diff --git a/.github/workflows/run-adaptivity-tests-parallel.yml b/.github/workflows/run-adaptivity-tests-parallel.yml
index ef3ed1c..dd4b710 100644
--- a/.github/workflows/run-adaptivity-tests-parallel.yml
+++ b/.github/workflows/run-adaptivity-tests-parallel.yml
@@ -43,7 +43,7 @@ jobs:
working-directory: micro-manager/tests/integration/test_unit_cube
run: |
mpiexec -n 2 --allow-run-as-root micro-manager-precice micro-manager-config-global-adaptivity-parallel.json &
- python3 unit_cube.py
+ python3 unit_cube.py 2
adaptivity_unit_tests_parallel:
name: Adaptivity unit tests in parallel
diff --git a/.github/workflows/run-adaptivity-tests-serial.yml b/.github/workflows/run-adaptivity-tests-serial.yml
index 391968b..69b0de5 100644
--- a/.github/workflows/run-adaptivity-tests-serial.yml
+++ b/.github/workflows/run-adaptivity-tests-serial.yml
@@ -34,14 +34,14 @@ jobs:
working-directory: micro-manager/tests/integration/test_unit_cube
run: |
micro-manager-precice micro-manager-config-local-adaptivity.json &
- python3 unit_cube.py
+ python3 unit_cube.py 2
- name: Run integration test with global adaptivity
timeout-minutes: 3
working-directory: micro-manager/tests/integration/test_unit_cube
run: |
micro-manager-precice micro-manager-config-global-adaptivity.json &
- python3 unit_cube.py
+ python3 unit_cube.py 2
adaptivity_unit_tests_serial:
name: Run adaptivity unit tests in serial
diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml
index e256341..2a61090 100644
--- a/.github/workflows/run-domain-decomposition-tests.yml
+++ b/.github/workflows/run-domain-decomposition-tests.yml
@@ -8,50 +8,6 @@ on:
branches:
- "*"
jobs:
- domain_decomposition_integration_tests:
- name: Run domain decomposition integration tests
- runs-on: ubuntu-latest
- container: precice/precice:nightly
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- path: micro-manager
-
- - name: Install sudo for MPI
- working-directory: micro-manager
- run: |
- apt-get -qq update
- apt-get -qq install sudo
-
- - name: Use mpi4py
- uses: mpi4py/setup-mpi@v1
-
- - name: Install Dependencies
- working-directory: micro-manager
- run: |
- apt-get -qq update
- apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- pip3 install --upgrade pip
-
- - name: Install micro-manager
- working-directory: micro-manager
- run: pip3 install .
-
- - name: Run integration test (2 processes)
- timeout-minutes: 3
- working-directory: micro-manager/tests/integration/test_unit_cube
- run: |
- mpiexec -n 2 --allow-run-as-root micro-manager-precice micro-manager-config-parallel-1.json &
- python3 unit_cube.py
-
- - name: Run integration test (6 processes)
- timeout-minutes: 3
- working-directory: micro-manager/tests/integration/test_unit_cube
- run: |
- mpiexec -n 6 --oversubscribe --allow-run-as-root micro-manager-precice micro-manager-config-parallel-2.json &
- python3 unit_cube.py
-
domain_decomposition_unit_tests:
name: Run domain decomposition unit tests
runs-on: ubuntu-latest
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7926ed9..64032fb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,7 @@
## latest
+- Fix bug in calling of the adaptivity computation for explicit coupling scenarios https://github.com/precice/micro-manager/pull/145
- Fix bug in handling of vector data returned by the MicroSimulation `solve()` method, for scenarios with adaptivity https://github.com/precice/micro-manager/pull/143
- Remove the `scalar` and `vector` keyword values from data names in configuration https://github.com/precice/micro-manager/pull/142
- Set default logger to stdout and add output directory setting option for file loggers https://github.com/precice/micro-manager/pull/139
diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py
index 116d594..d2c9e13 100644
--- a/micro_manager/micro_manager.py
+++ b/micro_manager/micro_manager.py
@@ -162,6 +162,7 @@ def solve(self) -> None:
)
adaptivity_cpu_time = 0.0
+ first_iteration = True
while self._participant.is_coupling_ongoing():
@@ -173,30 +174,29 @@ def solve(self) -> None:
sim_states_cp[i] = self._micro_sims[i].get_state()
t_checkpoint = t
n_checkpoint = n
+ first_iteration = True
- if self._is_adaptivity_on:
- if not self._adaptivity_in_every_implicit_step:
- start_time = time.process_time()
- self._adaptivity_controller.compute_adaptivity(
- dt,
- self._micro_sims,
- self._data_for_adaptivity,
- )
+ if self._is_adaptivity_on:
+ if self._adaptivity_in_every_implicit_step or first_iteration:
+ start_time = time.process_time()
+ self._adaptivity_controller.compute_adaptivity(
+ dt,
+ self._micro_sims,
+ self._data_for_adaptivity,
+ )
- end_time = time.process_time()
+ end_time = time.process_time()
- adaptivity_cpu_time = end_time - start_time
+ adaptivity_cpu_time = end_time - start_time
- # Only checkpoint the adaptivity configuration if adaptivity is computed
- # once in every time window
- self._adaptivity_controller.write_checkpoint()
+ # Only checkpoint the adaptivity configuration if adaptivity is computed
+ # once in every time window
+ self._adaptivity_controller.write_checkpoint()
- active_sim_ids = (
- self._adaptivity_controller.get_active_sim_ids()
- )
+ active_sim_ids = self._adaptivity_controller.get_active_sim_ids()
- for active_id in active_sim_ids:
- self._micro_sims_active_steps[active_id] += 1
+ for active_id in active_sim_ids:
+ self._micro_sims_active_steps[active_id] += 1
micro_sims_input = self._read_data_from_precice(dt)
@@ -243,6 +243,7 @@ def solve(self) -> None:
self._micro_sims[i].set_state(sim_states_cp[i])
n = n_checkpoint
t = t_checkpoint
+ first_iteration = False
# If adaptivity is computed only once per time window, the states of sims need to be reset too
if self._is_adaptivity_on:
@@ -462,12 +463,7 @@ def initialize(self) -> None:
if is_initial_data_required and not is_initial_data_available:
raise Exception(
- "The initialize() method of the Micro simulation requires initial data, but no initial data has been provided."
- )
-
- if not is_initial_data_required and is_initial_data_available:
- warn(
- "The initialize() method is only allowed to return data which is required for the adaptivity calculation."
+ "The initialize() method of the Micro simulation requires initial data, but no initial macro data has been provided."
)
# Get initial data from micro simulations if initialize() method exists
diff --git a/tests/integration/test_unit_cube/clean-test.sh b/tests/integration/test_unit_cube/clean-test.sh
index 60aa7a1..bcf78bd 100755
--- a/tests/integration/test_unit_cube/clean-test.sh
+++ b/tests/integration/test_unit_cube/clean-test.sh
@@ -1,12 +1,9 @@
rm -fv *-events-summary.json
rm -fv *-events.json
rm -fv *.log
-rm -r -fv precice-run/
-rm -r -fv precice-profiling/
+rm -rfv precice-run/ precice-profiling/
rm -fv *.vtk
rm -fv *.out
rm -fv *.err
-rm -fv output/*.vtu
-rm -fv output/*.pvtu
-rm -r -fv __pycache__
+rm -rfv __pycache__ output/ .venv/ adaptivity_output/
rm -fv *.csv
diff --git a/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json
index 01d58cc..726db4c 100644
--- a/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json
+++ b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json
@@ -4,8 +4,8 @@
"coupling_params": {
"precice_config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-cube-mesh",
- "read_data_names": ["macro-scalar-data", "macro-vector-data"],
- "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ "write_data_names": ["micro-data-1", "micro-data-2"],
+ "read_data_names": ["macro-data-1"]
},
"simulation_params": {
"micro_dt": 1.0,
@@ -14,11 +14,11 @@
"adaptivity": "True",
"adaptivity_settings": {
"type": "global",
- "data": ["macro-scalar-data", "micro-vector-data"],
- "history_param": 0.5,
+ "data": ["micro-data-1", "micro-data-2"],
+ "history_param": 1.0,
"coarsening_constant": 0.3,
"refining_constant": 0.4,
- "every_implicit_iteration": "True",
+ "every_implicit_iteration": "False",
"output_cpu_time": "True"
}
},
diff --git a/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json
index 67d4534..a027dc1 100644
--- a/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json
+++ b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json
@@ -3,8 +3,8 @@
"coupling_params": {
"precice_config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-cube-mesh",
- "read_data_names": ["macro-scalar-data", "macro-vector-data"],
- "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ "write_data_names": ["micro-data-1", "micro-data-2"],
+ "read_data_names": ["macro-data-1"]
},
"simulation_params": {
"micro_dt": 1.0,
@@ -12,7 +12,7 @@
"adaptivity": "True",
"adaptivity_settings": {
"type": "global",
- "data": ["macro-scalar-data", "macro-vector-data"],
+ "data": ["micro-data-1", "micro-data-2"],
"history_param": 0.5,
"coarsening_constant": 0.3,
"refining_constant": 0.4,
diff --git a/tests/integration/test_unit_cube/micro-manager-config-load-balancing.json b/tests/integration/test_unit_cube/micro-manager-config-load-balancing.json
new file mode 100644
index 0000000..ddc8a9f
--- /dev/null
+++ b/tests/integration/test_unit_cube/micro-manager-config-load-balancing.json
@@ -0,0 +1,34 @@
+{
+ "micro_file_name": "micro_dummy",
+ "output_dir": "adaptivity_output",
+ "coupling_params": {
+ "precice_config_file_name": "precice-config.xml",
+ "macro_mesh_name": "macro-cube-mesh",
+ "read_data_names": ["macro-scalar-data", "macro-vector-data"],
+ "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ },
+ "simulation_params": {
+ "micro_dt": 1.0,
+ "macro_domain_bounds": [0, 1, 0, 1, 0, 1],
+ "decomposition": [2, 1, 1],
+ "adaptivity": "True",
+ "adaptivity_settings": {
+ "type": "global",
+ "load_balancing": "True",
+ "load_balancing_settings": {
+ "load_balancing_n": 5,
+ "two_step_load_balancing": "True",
+ "balancing_threshold": 2
+ },
+ "data": ["macro-scalar-data", "micro-vector-data"],
+ "history_param": 0.5,
+ "coarsening_constant": 0.3,
+ "refining_constant": 0.4,
+ "every_implicit_iteration": "True",
+ "output_cpu_time": "True"
+ }
+ },
+ "diagnostics": {
+ "output_micro_sim_solve_time": "True"
+ }
+}
diff --git a/tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json b/tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json
index e130537..3308e07 100644
--- a/tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json
+++ b/tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json
@@ -3,8 +3,8 @@
"coupling_params": {
"precice_config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-cube-mesh",
- "read_data_names": ["macro-scalar-data", "macro-vector-data"],
- "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ "write_data_names": ["micro-data-1", "micro-data-2"],
+ "read_data_names": ["macro-data-1"]
},
"simulation_params": {
"micro_dt": 1.0,
@@ -12,7 +12,7 @@
"adaptivity": "True",
"adaptivity_settings": {
"type": "local",
- "data": ["macro-scalar-data", "macro-vector-data"],
+ "data": ["micro-data-1", "micro-data-2"],
"history_param": 0.5,
"coarsening_constant": 0.3,
"refining_constant": 0.4,
diff --git a/tests/integration/test_unit_cube/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube/micro-manager-config-parallel-1.json
index e5feb34..fa6c1d4 100644
--- a/tests/integration/test_unit_cube/micro-manager-config-parallel-1.json
+++ b/tests/integration/test_unit_cube/micro-manager-config-parallel-1.json
@@ -3,8 +3,8 @@
"coupling_params": {
"precice_config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-cube-mesh",
- "read_data_names": ["macro-scalar-data", "macro-vector-data"],
- "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ "write_data_names": ["micro-data-1", "micro-data-2"],
+ "read_data_names": ["macro-data-1"]
},
"simulation_params": {
"micro_dt": 1.0,
diff --git a/tests/integration/test_unit_cube/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube/micro-manager-config-parallel-2.json
index 3a4d067..565cd20 100644
--- a/tests/integration/test_unit_cube/micro-manager-config-parallel-2.json
+++ b/tests/integration/test_unit_cube/micro-manager-config-parallel-2.json
@@ -3,8 +3,8 @@
"coupling_params": {
"precice_config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-cube-mesh",
- "read_data_names": ["macro-scalar-data", "macro-vector-data"],
- "write_data_names": ["micro-scalar-data", "micro-vector-data"]
+ "write_data_names": ["micro-data-1", "micro-data-2"],
+ "read_data_names": ["macro-data-1"]
},
"simulation_params": {
"micro_dt": 1.0,
diff --git a/tests/integration/test_unit_cube/micro_dummy.py b/tests/integration/test_unit_cube/micro_dummy.py
index 6c10819..44173ff 100644
--- a/tests/integration/test_unit_cube/micro_dummy.py
+++ b/tests/integration/test_unit_cube/micro_dummy.py
@@ -2,6 +2,9 @@
Micro simulation
In this script we solve a dummy micro problem to just show the working of the macro-micro coupling
"""
+import copy
+import random
+import time
class MicroSimulation:
@@ -10,28 +13,38 @@ def __init__(self, sim_id):
Constructor of MicroSimulation class.
"""
self._sim_id = sim_id
- self._micro_scalar_data = None
- self._micro_vector_data = None
- self._checkpoint = None
+
+ sim_types = [4, 88, 37, 12, 1, 23, 134]
+
+ self._this_sim_type = random.choice(sim_types)
+
+ # Artificial state of 100 floats
+ self._state = [x * 0.1 for x in range(100)]
def initialize(self):
- self._micro_scalar_data = 0
- self._micro_vector_data = []
- self._checkpoint = 0
+ return {
+ "micro-data-1": self._this_sim_type * 0.5,
+ "micro-data-2": [
+ self._this_sim_type * 2,
+ self._this_sim_type * 3,
+ self._this_sim_type * 4,
+ ],
+ }
def solve(self, macro_data, dt):
- assert dt != 0
- self._micro_vector_data = macro_data["macro-vector-data"]
- self._micro_scalar_data = macro_data["macro-scalar-data"]
+ time.sleep(self._this_sim_type * 0.001)
return {
- "micro-scalar-data": self._micro_scalar_data,
- "micro-vector-data": self._micro_vector_data,
+ "micro-data-1": self._this_sim_type * 0.5,
+ "micro-data-2": [
+ self._this_sim_type * 2,
+ self._this_sim_type * 3,
+ self._this_sim_type * 4,
+ ],
}
def get_state(self):
- return [self._micro_scalar_data, self._micro_vector_data]
+ return copy.deepcopy(self._state)
def set_state(self, state):
- self._micro_scalar_data = state[0]
- self._micro_vector_data = state[0]
+ self._state = copy.deepcopy(state)
diff --git a/tests/integration/test_unit_cube/precice-config.xml b/tests/integration/test_unit_cube/precice-config.xml
index 91395c0..5bccfe8 100644
--- a/tests/integration/test_unit_cube/precice-config.xml
+++ b/tests/integration/test_unit_cube/precice-config.xml
@@ -4,20 +4,18 @@
-
-
-
-
+
+
+
-
-
-
-
+
+
+
@@ -26,18 +24,16 @@
-
-
-
-
+
+
+
-
-
-
-
+
+
+
@@ -51,25 +47,8 @@
-
-
-
-
+
+
+
diff --git a/tests/integration/test_unit_cube/unit_cube.py b/tests/integration/test_unit_cube/unit_cube.py
index b87f005..86fe683 100644
--- a/tests/integration/test_unit_cube/unit_cube.py
+++ b/tests/integration/test_unit_cube/unit_cube.py
@@ -1,6 +1,7 @@
#! /usr/bin/env python3
#
+import argparse
import numpy as np
import precice
@@ -9,19 +10,20 @@ def main():
"""
Dummy macro simulation which is coupled to a set of micro simulations via preCICE and the Micro Manager
"""
- n = n_checkpoint = 0
- t = t_checkpoint = 0
- t_end = 10
+ parser = argparse.ArgumentParser(description="Macro simulation")
+ parser.add_argument("np_axis", type=int, help="Number of points in each axis")
+ args = parser.parse_args()
+
+ t = 0
# preCICE setup
participant = precice.Participant("macro-cube", "precice-config.xml", 0, 1)
mesh_name = "macro-cube-mesh"
- read_data_names = {"micro-scalar-data": 0, "micro-vector-data": 1}
- write_data_names = {"macro-scalar-data": 0, "macro-vector-data": 1}
+ read_data_names = {"micro-data-1": 0, "micro-data-2": 1}
# Coupling mesh - unit cube with 5 points in each direction
- np_axis = 5
+ np_axis = args.np_axis
x_coords, y_coords, z_coords = np.meshgrid(
np.linspace(0, 1, np_axis),
np.linspace(0, 1, np_axis),
@@ -43,29 +45,6 @@ def main():
# Define points on entire domain as coupling mesh
vertex_ids = participant.set_mesh_vertices(mesh_name, coords)
- write_data = []
- write_data.append(np.zeros(nv))
- write_data.append(np.zeros((nv, participant.get_mesh_dimensions(mesh_name))))
-
- # Define initial data to write to preCICE
- scalar_value = 1.0
- vector_value = [2.0, 3.0, 4.0]
- for z in range(np_axis):
- for y in range(np_axis):
- for x in range(np_axis):
- n = x + y * np_axis + z * np_axis * np_axis
- write_data[0][n] = scalar_value
- write_data[1][n, 0] = vector_value[0]
- write_data[1][n, 1] = vector_value[1]
- write_data[1][n, 2] = vector_value[2]
- scalar_value += 1
- vector_value = [x + 1 for x in vector_value]
-
- # Write initial data to preCICE
- if participant.requires_initial_data():
- for count, data_name in enumerate(write_data_names.keys()):
- participant.write_data(mesh_name, data_name, vertex_ids, write_data[count])
-
participant.initialize()
read_data = [None, None]
@@ -73,51 +52,19 @@ def main():
# time loop
while participant.is_coupling_ongoing():
- # write checkpoint
- if participant.requires_writing_checkpoint():
- print("Saving macro state")
- t_checkpoint = t
- n_checkpoint = n
-
# Read data from preCICE
for count, data_name in enumerate(read_data_names.keys()):
read_data[count] = participant.read_data(
mesh_name, data_name, vertex_ids, 1.0
)
- # Set the read data as the write data with an increment
- write_data[0] = read_data[0] + 1
- write_data[1] = read_data[1] + 1
-
- # Define new data to write to preCICE midway through the simulation
- if t == t_end / 2:
- scalar_value = 1.0
- vector_value = [2.0, 3.0, 4.0]
- for z in range(np_axis):
- for y in range(np_axis):
- for x in range(np_axis):
- n = x + y * np_axis + z * np_axis * np_axis
- write_data[0][n] = scalar_value
- write_data[1][n, 0] = vector_value[0]
- write_data[1][n, 1] = vector_value[1]
- write_data[1][n, 2] = vector_value[2]
-
- # Write data to preCICE
- for count, data_name in enumerate(write_data_names.keys()):
- participant.write_data(mesh_name, data_name, vertex_ids, write_data[count])
+ participant.write_data(mesh_name, "macro-data-1", vertex_ids, np.ones(nv))
participant.advance(dt)
dt = participant.get_max_time_step_size()
- # advance variables
- n += 1
t += dt
- if participant.requires_reading_checkpoint():
- print("Reverting to old macro state")
- t = t_checkpoint
- n = n_checkpoint
-
participant.finalize()