diff --git a/fidimag/common/chain_method_integrators.py b/fidimag/common/chain_method_integrators.py index f434b412..738250bb 100644 --- a/fidimag/common/chain_method_integrators.py +++ b/fidimag/common/chain_method_integrators.py @@ -160,10 +160,10 @@ def __init__(self, ChainObj, # band, forces, distances, rhs_fun, action_fun, # n_images, n_dofs_image, maxSteps=1000, - maxCreep=5, actionTol=1e-10, forcesTol=1e-6, - etaScale=1.0, dEta=2, minEta=0.001, + maxCreep=6, actionTol=1e-10, forcesTol=1e-6, + etaScale=1e-6, dEta=4., minEta=1e-6, # perturbSeed=42, perturbFactor=0.1, - nTrail=10, resetMax=20 + nTrail=13, resetMax=20 ): # super(FSIntegrator, self).__init__(band, rhs_fun) @@ -188,17 +188,16 @@ def __init__(self, ChainObj, self.n_dofs_image = self.ChainObj.n_dofs_image # self.forces_prev = np.zeros_like(band).reshape(n_images, -1) # self.G : - self.forces = self.ChainObj.forces + self.forces = self.ChainObj.G self.distances = self.ChainObj.distances - self.forces_old = np.zeros_like(self.ChainObj.forces) + self.forces_old = np.zeros_like(self.ChainObj.G) # self.band should be just a reference to the band in the ChainObj self.band = self.ChainObj.band - self.band_old = np.zeros_like(self.band) + self.band_old = np.copy(self.band) def run_for(self, n_steps): - t = 0.0 nStart = 0 exitFlag = False totalRestart = True @@ -214,19 +213,25 @@ def run_for(self, n_steps): INNER_DOFS = slice(self.n_dofs_image, -self.n_dofs_image) + # Save data of energies on every step + self.ChainObj.tablewriter.save() + self.ChainObj.tablewriter_dm.save() + + np.save(self.ChainObj.name + '_init.npy', self.ChainObj.band) + while not exitFlag: if totalRestart: - if self.step > 0: + if self.i_step > 0: print('Restarting') self.band[:] = self.band_old # Compute from self.band. Do not update the step at this stage: # This step updates the forces and distances in the G array of the nebm module, # using the current band state self.y - # TODO: remove time from chain method rhs - # make a specific function to update G?? - self.rhs(t, self.y) + # TODO: make a specific function to update G?? + print('Computing forces') + self.ChainObj.nebm_step(self.band, ensure_zero_extrema=True) self.action = self.ChainObj.compute_action() # self.step += 1 @@ -250,13 +255,17 @@ def run_for(self, n_steps): self.trailAction self.ChainObj.nebm_step(self.band, ensure_zero_extrema=True) - self.action = self.ChainObj.action_fun() + self.action = self.ChainObj.compute_action() self.trailAction[nStart] = self.action nStart = next(trailPool) self.i_step += 1 + # Save data of energies on every step + self.ChainObj.tablewriter.save() + self.ChainObj.tablewriter_dm.save() + # Getting averages of forces from the INNER images in the band (no extrema) # (forces are given by vector G in the chain method code) # TODO: we might use all band images, not only inner ones, although G is zero at the extrema @@ -268,10 +277,14 @@ def run_for(self, n_steps): # Average step difference between trailing action and new action deltaAction = (np.abs(self.trailAction[nStart] - self.action)) / self.nTrail + # print('trail Actions', self.trailAction) + # Print log print(f'Step {self.i_step} ⟨RMS(G)〉= {mean_rms_G_norms_per_image:.5e} ', f'deltaAction = {deltaAction:.5e} Creep n = {creepCount:>3} resetC = {resetCount:>3} ', - f'eta = {eta:>5.4e}') + f'eta = {eta:>5.4e} ' + f'action = {self.action:>5.4e} action_old = {self.action_old:>5.4e}' + ) # 10 seems like a magic number; we set here a minimum number of evaulations if (nStart > self.nTrail * 10) and (deltaAction < self.actionTol): @@ -291,17 +304,25 @@ def run_for(self, n_steps): eta = eta / (self.dEta * self.dEta) # If eta is too small, reset and start again - if (eta < 1e-3): - print('') + if (eta < self.minEta): + # print('') resetCount += 1 # bestAction = self.action_old - self.refine_path(self.ChainObj.distances, self.band) # Resets the path to equidistant structures (smoothing kinks?) + print('Refining path') + self.refine_path(self.ChainObj.path_distances, self.band) # Resets the path to equidistant structures (smoothing kinks?) # PathChanged[:] = True if resetCount > self.resetMax: - print('Failed to converge!') - # Otherwise, just + print('Failed to converge! Reached max number of restarts') + exitFlag = True + break # creep loop + + totalRestart = True + break # creep loop + + # Otherwise, just start again with smaller alpha else: + print('Decreasing alpha') self.band[:] = self.band_old self.forces[:] = self.forces_old # If action decreases, move to next creep step @@ -318,20 +339,22 @@ def run_for(self, n_steps): # END creep while loop # After creep loop: - self.eta = self.eta * self.dEta + eta = eta * self.dEta resetCount = 0 + np.save(self.ChainObj.name + '.npy', self.ChainObj.band) + # Taken from the string method class - def refine_path(self, distances, band): + def refine_path(self, path_distances, band): """ """ - new_dist = np.linspace(distances[0], distances[-1], distances.shape[0]) + new_dist = np.linspace(path_distances[0], path_distances[-1], path_distances.shape[0]) # Restructure the string by interpolating every spin component # print(self.integrator.y[self.n_dofs_image:self.n_dofs_image + 10]) bandrs = band.reshape(self.n_images, self.n_dofs_image) for i in range(self.n_dofs_image): - cs = si.CubicSpline(distances, bandrs[:, i]) + cs = si.CubicSpline(path_distances, bandrs[:, i]) bandrs[:, i] = cs(new_dist) # def set_options(self): diff --git a/fidimag/common/hubert_minimiser.py b/fidimag/common/hubert_minimiser.py index bda4f702..caa0a22c 100644 --- a/fidimag/common/hubert_minimiser.py +++ b/fidimag/common/hubert_minimiser.py @@ -264,6 +264,7 @@ def minimise(self, exitFlag = True break # creep loop + # TODO: check if we need to use last spin state here if self.totalE > self.totalE_last: # If E increases, decrease eta so minimise slower # print('Decreasing eta') self.creepCount = 0 diff --git a/fidimag/common/nebm_FS.py b/fidimag/common/nebm_FS.py index 212c0c53..3522dd40 100644 --- a/fidimag/common/nebm_FS.py +++ b/fidimag/common/nebm_FS.py @@ -113,7 +113,7 @@ def __init__(self, sim, name='unnamed', climbing_image=None, openmp=False, - integrator='sundials' # or scipy + # integrator='sundials' # or scipy ): super(NEBM_FS, self).__init__(sim, @@ -127,7 +127,7 @@ def __init__(self, sim, ) # We need the gradient norm to calculate the action - self.gradientENorm = np.zeros_like(self.n_images) + self.gradientENorm = np.zeros(self.n_images) # Initialisation ------------------------------------------------------ # See the NEBMBase class for details @@ -136,7 +136,7 @@ def __init__(self, sim, self.initialise_energies() - self.initialise_integrator(integrator=integrator) + self.initialise_integrator() self.create_tablewriter() @@ -236,9 +236,9 @@ def compute_effective_field_and_energy(self, y): the relaxation function """ - self.gradientE = self.gradientE.reshape(self.n_images, -1) + self.gradientE.shape = (self.n_images, -1) - y = y.reshape(self.n_images, -1) + y.shape = (self.n_images, -1) # Do not update the extreme images for i in range(1, len(y) - 1): @@ -253,13 +253,17 @@ def compute_effective_field_and_energy(self, y): self.energies[i] = self.sim.compute_energy() + # TODO: move this calc to the action function # Compute the gradient norm per every image - Gnorms2 = np.sum(self.gradientE.reshape(-1, 3)**2, axis=1) + Gnorms2 = np.sum(self.gradientE**2, axis=1) / self.n_images # Compute the root mean square per image - self.gradientENorm[:] = np.sqrt(np.mean(Gnorms2.reshape(self.n_images, -1), axis=1)) + self.gradientENorm[:] = np.sqrt(Gnorms2) - y = y.reshape(-1) - self.gradientE = self.gradientE.reshape(-1) + # DEBUG: + # print('gradEnorm', self.gradientENorm) + + y.shape = (-1) + self.gradientE.shape = (-1) def compute_tangents(self, y): nebm_clib.compute_tangents(self.tangents, y, self.energies, @@ -327,7 +331,10 @@ def compute_action(self): # TODO: we can use a better quadrature such as Gaussian # notice that the gradient norm here is using the RMS - action = spi.trapezoid(self.gradientENorm, self.distances) + action = spi.trapezoid(self.gradientENorm, self.path_distances) + + # DEBUG: + # print('action from gradE', action) # The spring term in the action is added as |F_k|^2 / (2 * self.k) = self.k * x^2 / 2 # (CHECK) This assumes the spring force is orthogonal to the force gradient (after projection) @@ -341,10 +348,13 @@ def compute_action(self): dist_minus_norm = self.distances[:-1] # dY_plus_norm = distances[i]; # dY_minus_norm = distances[i - 1]; - springF2 = self.k * ((dist_plus_norm - dist_minus_norm)**2) + springF2 = self.k[1:-1] * ((dist_plus_norm - dist_minus_norm)**2) # CHECK: do we need to scale? action += np.sum(springF2) / (self.n_images - 2) + # DEBUG: + # print('action spring term', np.sum(springF2) / (self.n_images - 2)) + return action def compute_min_action(self): diff --git a/tests/tes_oommf.py b/tests/test_oommf.py similarity index 100% rename from tests/tes_oommf.py rename to tests/test_oommf.py diff --git a/tests/test_two_particles_nebm-fs.py b/tests/test_two_particles_nebm-fs.py new file mode 100644 index 00000000..7ddcb971 --- /dev/null +++ b/tests/test_two_particles_nebm-fs.py @@ -0,0 +1,111 @@ +import pytest + +# FIDIMAG: +from fidimag.micro import Sim +from fidimag.common import CuboidMesh +from fidimag.micro import UniformExchange, UniaxialAnisotropy +from fidimag.common.nebm_FS import NEBM_FS +import numpy as np + +# Material Parameters +# Parameters +A = 1e-12 +Kx = 1e5 +# Strong anisotropy +Ms = 3.8e5 + + +""" +We will define two particles using a 4 sites mesh, letting the +sites in the middle as Ms = 0 + +""" + + +def two_part(pos): + + x = pos[0] + + if x > 6 or x < 3: + return Ms + else: + return 0 + +# Finite differences mesh +mesh = CuboidMesh(nx=3, ny=1, nz=1, dx=3, dy=3, dz=3, unit_length=1e-9) + +# Simulation Function +def relax_string(maxst, simname, init_im, interp, save_every=10000): + """ + """ + + # Prepare simulation + # We define the cylinder with the Magnetisation function + sim = Sim(mesh) + sim.Ms = two_part + + # sim.add(UniformExchange(A=A)) + + # Uniaxial anisotropy along x-axis + sim.add(UniaxialAnisotropy(Kx, axis=(1, 0, 0))) + + # Define many initial states close to one extreme. We want to check + # if the images in the last step, are placed mostly in equally positions + # init_images = init_im + + # Number of images between each state specified before (here we need only + # two, one for the states between the initial and intermediate state + # and another one for the images between the intermediate and final + # states). Thus, the number of interpolations must always be + # equal to 'the number of initial states specified', minus one. + interpolations = interp + + nebm = NEBM_FS(sim, init_im, interpolations=interpolations, name=simname) + + # dt = integrator.stepsize means after every integrator step, the images + # are rescaled. We can run more integrator steps if we decrease the + # stepsize, e.g. dt=1e-3 and integrator.stepsize=1e-4 + nebm.integrator.maxSteps = 33 + nebm.integrator.run_for(maxst, + # save_vtks_every=save_every, + # save_npys_every=save_every, + ) + + return nebm + + +def mid_m(pos): + if pos[0] > 4: + return (0.5, 0, 0.2) + else: + return (-0.5, 0, 0.2) + + +def test_energy_barrier_2particles_string(): + # Initial images: we set here a rotation interpolating + init_im = [(-1, 0, 0), (1, 0, 0)] + interp = [13] + + barriers = [] + + # Define different ks for multiple simulations + # krange = ['1e8'] + + string = relax_string(10, + 'nebmfs_2particles_k1e8_10-10int', + init_im, + interp, + save_every=5000, + ) + + _file = np.loadtxt('string_2particles_k1e8_10-10int_energy.ndt') + barriers.append((np.max(_file[-1][1:]) - _file[-1][1]) / 1.602e-19) + + print('Energy barrier is:', barriers[-1]) + assert np.abs(barriers[-1] - 0.016019) < 1e-5 + + print(barriers) + + +if __name__ == '__main__': + test_energy_barrier_2particles_string()