forked from openmc-dev/openmc
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvolume_calc.cpp
421 lines (357 loc) · 13.5 KB
/
volume_calc.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
#include "openmc/volume_calc.h"
#include "openmc/capi.h"
#include "openmc/cell.h"
#include "openmc/constants.h"
#include "openmc/error.h"
#include "openmc/geometry.h"
#include "openmc/hdf5_interface.h"
#include "openmc/material.h"
#include "openmc/message_passing.h"
#include "openmc/nuclide.h"
#include "openmc/output.h"
#include "openmc/random_lcg.h"
#include "openmc/settings.h"
#include "openmc/timer.h"
#include "openmc/xml_interface.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "xtensor/xadapt.hpp"
#include "xtensor/xview.hpp"
#include <algorithm> // for copy
#include <cmath> // for pow, sqrt
#include <sstream>
#include <unordered_set>
namespace openmc {
//==============================================================================
// Global variables
//==============================================================================
namespace model {
std::vector<VolumeCalculation> volume_calcs;
}
//==============================================================================
// VolumeCalculation implementation
//==============================================================================
VolumeCalculation::VolumeCalculation(pugi::xml_node node)
{
// Read domain type (cell, material or universe)
std::string domain_type = get_node_value(node, "domain_type");
if (domain_type == "cell") {
domain_type_ = FILTER_CELL;
} else if (domain_type == "material") {
domain_type_ = FILTER_MATERIAL;
} else if (domain_type == "universe") {
domain_type_ = FILTER_UNIVERSE;
} else {
fatal_error(std::string("Unrecognized domain type for stochastic "
"volume calculation: " + domain_type));
}
// Read domain IDs, bounding corodinates and number of samples
domain_ids_ = get_node_array<int>(node, "domain_ids");
lower_left_ = get_node_array<double>(node, "lower_left");
upper_right_ = get_node_array<double>(node, "upper_right");
n_samples_ = std::stoi(get_node_value(node, "samples"));
// Ensure there are no duplicates by copying elements to a set and then
// comparing the length with the original vector
std::unordered_set<int> unique_ids(domain_ids_.cbegin(), domain_ids_.cend());
if (unique_ids.size() != domain_ids_.size()) {
throw std::runtime_error{"Domain IDs for a volume calculation "
"must be unique."};
}
}
std::vector<VolumeCalculation::Result> VolumeCalculation::execute() const
{
// Shared data that is collected from all threads
int n = domain_ids_.size();
std::vector<std::vector<int>> master_indices(n); // List of material indices for each domain
std::vector<std::vector<int>> master_hits(n); // Number of hits for each material in each domain
// Divide work over MPI processes
int min_samples = n_samples_ / mpi::n_procs;
int remainder = n_samples_ % mpi::n_procs;
int i_start, i_end;
if (mpi::rank < remainder) {
i_start = (min_samples + 1)*mpi::rank;
i_end = i_start + min_samples + 1;
} else {
i_start = (min_samples + 1)*remainder + (mpi::rank - remainder)*min_samples;
i_end = i_start + min_samples;
}
#pragma omp parallel
{
// Variables that are private to each thread
std::vector<std::vector<int>> indices(n);
std::vector<std::vector<int>> hits(n);
Particle p;
prn_set_stream(STREAM_VOLUME);
// Sample locations and count hits
#pragma omp for
for (int i = i_start; i < i_end; i++) {
set_particle_seed(i);
p.n_coord_ = 1;
Position xi {prn(), prn(), prn()};
p.r() = lower_left_ + xi*(upper_right_ - lower_left_);
p.u() = {0.5, 0.5, 0.5};
// If this location is not in the geometry at all, move on to next block
if (!find_cell(&p, false)) continue;
if (domain_type_ == FILTER_MATERIAL) {
if (p.material_ != MATERIAL_VOID) {
for (int i_domain = 0; i_domain < n; i_domain++) {
if (model::materials[p.material_]->id_ == domain_ids_[i_domain]) {
this->check_hit(p.material_, indices[i_domain], hits[i_domain]);
break;
}
}
}
} else if (domain_type_ == FILTER_CELL) {
for (int level = 0; level < p.n_coord_; ++level) {
for (int i_domain=0; i_domain < n; i_domain++) {
if (model::cells[p.coord_[level].cell]->id_ == domain_ids_[i_domain]) {
this->check_hit(p.material_, indices[i_domain], hits[i_domain]);
break;
}
}
}
} else if (domain_type_ == FILTER_UNIVERSE) {
for (int level = 0; level < p.n_coord_; ++level) {
for (int i_domain = 0; i_domain < n; ++i_domain) {
if (model::universes[p.coord_[level].universe]->id_ == domain_ids_[i_domain]) {
check_hit(p.material_, indices[i_domain], hits[i_domain]);
break;
}
}
}
}
}
// At this point, each thread has its own pair of index/hits lists and we now
// need to reduce them. OpenMP is not nearly smart enough to do this on its own,
// so we have to manually reduce them
#ifdef _OPENMP
#pragma omp for ordered schedule(static)
for (int i = 0; i < omp_get_num_threads(); ++i) {
#pragma omp ordered
for (int i_domain = 0; i_domain < n; ++i_domain) {
for (int j = 0; j < indices[i_domain].size(); ++j) {
// Check if this material has been added to the master list and if so,
// accumulate the number of hits
bool already_added = false;
for (int k = 0; k < master_indices[i_domain].size(); k++) {
if (indices[i_domain][j] == master_indices[i_domain][k]) {
master_hits[i_domain][k] += hits[i_domain][j];
already_added = true;
}
}
if (!already_added) {
// If we made it here, the material hasn't yet been added to the master
// list, so add entries to the master indices and master hits lists
master_indices[i_domain].push_back(indices[i_domain][j]);
master_hits[i_domain].push_back(hits[i_domain][j]);
}
}
}
}
#else
master_indices = indices;
master_hits = hits;
#endif
prn_set_stream(STREAM_TRACKING);
} // omp parallel
// Reduce hits onto master process
// Determine volume of bounding box
Position d {upper_right_ - lower_left_};
double volume_sample = d.x*d.y*d.z;
// Set size for members of the Result struct
std::vector<Result> results(n);
for (int i_domain = 0; i_domain < n; ++i_domain) {
// Get reference to result for this domain
auto& result {results[i_domain]};
// Create 2D array to store atoms/uncertainty for each nuclide. Later this
// is compressed into vectors storing only those nuclides that are non-zero
auto n_nuc = data::nuclides.size();
xt::xtensor<double, 2> atoms({n_nuc, 2}, 0.0);
#ifdef OPENMC_MPI
if (mpi::master) {
for (int j = 1; j < mpi::n_procs; j++) {
int q;
MPI_Recv(&q, 1, MPI_INTEGER, j, 0, mpi::intracomm, MPI_STATUS_IGNORE);
int buffer[2*q];
MPI_Recv(&buffer[0], 2*q, MPI_INTEGER, j, 1, mpi::intracomm, MPI_STATUS_IGNORE);
for (int k = 0; k < q; ++k) {
for (int m = 0; m < master_indices[i_domain].size(); ++m) {
if (buffer[2*k] == master_indices[i_domain][m]) {
master_hits[i_domain][m] += buffer[2*k + 1];
break;
}
}
}
}
} else {
int q = master_indices[i_domain].size();
int buffer[2*q];
for (int k = 0; k < q; ++k) {
buffer[2*k] = master_indices[i_domain][k];
buffer[2*k + 1] = master_hits[i_domain][k];
}
MPI_Send(&q, 1, MPI_INTEGER, 0, 0, mpi::intracomm);
MPI_Send(&buffer[0], 2*q, MPI_INTEGER, 0, 1, mpi::intracomm);
}
#endif
if (mpi::master) {
int total_hits = 0;
for (int j = 0; j < master_indices[i_domain].size(); ++j) {
total_hits += master_hits[i_domain][j];
double f = static_cast<double>(master_hits[i_domain][j]) / n_samples_;
double var_f = f*(1.0 - f)/n_samples_;
int i_material = master_indices[i_domain][j];
if (i_material == MATERIAL_VOID) continue;
const auto& mat = model::materials[i_material];
for (int k = 0; k < mat->nuclide_.size(); ++k) {
// Accumulate nuclide density
int i_nuclide = mat->nuclide_[k];
atoms(i_nuclide, 0) += mat->atom_density_[k] * f;
atoms(i_nuclide, 1) += std::pow(mat->atom_density_[k], 2) * var_f;
}
}
// Determine volume
result.volume[0] = static_cast<double>(total_hits) / n_samples_ * volume_sample;
result.volume[1] = std::sqrt(result.volume[0]
* (volume_sample - result.volume[0]) / n_samples_);
for (int j = 0; j < n_nuc; ++j) {
// Determine total number of atoms. At this point, we have values in
// atoms/b-cm. To get to atoms we multiply by 10^24 V.
double mean = 1.0e24 * volume_sample * atoms(j, 0);
double stdev = 1.0e24 * volume_sample * std::sqrt(atoms(j, 1));
// Convert full arrays to vectors
if (mean > 0.0) {
result.nuclides.push_back(j);
result.atoms.push_back(mean);
result.uncertainty.push_back(stdev);
}
}
}
}
return results;
}
void VolumeCalculation::to_hdf5(const std::string& filename,
const std::vector<Result>& results) const
{
// Create HDF5 file
hid_t file_id = file_open(filename, 'w');
// Write header info
write_attribute(file_id, "filetype", "volume");
write_attribute(file_id, "version", VERSION_VOLUME);
write_attribute(file_id, "openmc_version", VERSION);
#ifdef GIT_SHA1
write_attribute(file_id, "git_sha1", GIT_SHA1);
#endif
// Write current date and time
write_attribute(file_id, "date_and_time", time_stamp());
// Write basic metadata
write_attribute(file_id, "samples", n_samples_);
write_attribute(file_id, "lower_left", lower_left_);
write_attribute(file_id, "upper_right", upper_right_);
if (domain_type_ == FILTER_CELL) {
write_attribute(file_id, "domain_type", "cell");
}
else if (domain_type_ == FILTER_MATERIAL) {
write_attribute(file_id, "domain_type", "material");
}
else if (domain_type_ == FILTER_UNIVERSE) {
write_attribute(file_id, "domain_type", "universe");
}
for (int i = 0; i < domain_ids_.size(); ++i)
{
hid_t group_id = create_group(file_id, "domain_"
+ std::to_string(domain_ids_[i]));
// Write volume for domain
const auto& result {results[i]};
write_dataset(group_id, "volume", result.volume);
// Create array of nuclide names from the vector
auto n_nuc = result.nuclides.size();
if (!result.nuclides.empty()) {
std::vector<std::string> nucnames;
for (int i_nuc : result.nuclides) {
nucnames.push_back(data::nuclides[i_nuc]->name_);
}
// Create array of total # of atoms with uncertainty for each nuclide
xt::xtensor<double, 2> atom_data({n_nuc, 2});
xt::view(atom_data, xt::all(), 0) = xt::adapt(result.atoms);
xt::view(atom_data, xt::all(), 1) = xt::adapt(result.uncertainty);
// Write results
write_dataset(group_id, "nuclides", nucnames);
write_dataset(group_id, "atoms", atom_data);
}
close_group(group_id);
}
file_close(file_id);
}
void VolumeCalculation::check_hit(int i_material, std::vector<int>& indices,
std::vector<int>& hits) const
{
// Check if this material was previously hit and if so, increment count
bool already_hit = false;
for (int j = 0; j < indices.size(); j++) {
if (indices[j] == i_material) {
hits[j]++;
already_hit = true;
}
}
// If the material was not previously hit, append an entry to the material
// indices and hits lists
if (!already_hit) {
indices.push_back(i_material);
hits.push_back(1);
}
}
void free_memory_volume()
{
openmc::model::volume_calcs.clear();
}
} // namespace openmc
//==============================================================================
// OPENMC_CALCULATE_VOLUMES runs each of the stochastic volume calculations
// that the user has specified and writes results to HDF5 files
//==============================================================================
int openmc_calculate_volumes() {
using namespace openmc;
if (mpi::master) {
header("STOCHASTIC VOLUME CALCULATION", 3);
}
Timer time_volume;
time_volume.start();
for (int i = 0; i < model::volume_calcs.size(); ++i) {
if (mpi::master) {
write_message("Running volume calculation " + std::to_string(i+1) + "...", 4);
}
// Run volume calculation
const auto& vol_calc {model::volume_calcs[i]};
auto results = vol_calc.execute();
if (mpi::master) {
std::string domain_type;
if (vol_calc.domain_type_ == FILTER_CELL) {
domain_type = " Cell ";
} else if (vol_calc.domain_type_ == FILTER_MATERIAL) {
domain_type = " Material ";
} else {
domain_type = " Universe ";
}
// Display domain volumes
for (int j = 0; j < vol_calc.domain_ids_.size(); j++) {
std::stringstream msg;
msg << domain_type << vol_calc.domain_ids_[j] << ": " <<
results[j].volume[0] << " +/- " << results[j].volume[1] << " cm^3";
write_message(msg, 4);
}
// Write volumes to HDF5 file
std::string filename = settings::path_output + "volume_"
+ std::to_string(i+1) + ".h5";
vol_calc.to_hdf5(filename, results);
}
}
// Show elapsed time
time_volume.stop();
if (mpi::master) {
write_message("Elapsed time: " + std::to_string(time_volume.elapsed())
+ " s", 6);
}
return 0;
}