-
Notifications
You must be signed in to change notification settings - Fork 97
/
Copy pathcreate.cpp
257 lines (216 loc) · 9.59 KB
/
create.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
//------------------------------------------------------------------------------
//
// This file is part of the VAMPIRE open source package under the
// Free BSD licence (see licence file for details).
//
// (c) Richard F L Evans 2018. All rights reserved.
//
// Email: [email protected]
//
//------------------------------------------------------------------------------
//
// Standard Headers
#include <iostream>
#include <fstream>
// Vampire headers
#include "errors.hpp"
#include "atoms.hpp"
#include "cells.hpp"
#include "create.hpp"
#include "dipole.hpp"
#include "grains.hpp"
#include "ltmp.hpp"
#include "material.hpp"
#include "neighbours.hpp"
#include "sim.hpp"
#include "spintorque.hpp"
#include "unitcell.hpp"
#include "vio.hpp"
#include "vmath.hpp"
#include "vmpi.hpp"
// Internal create header
#include "internal.hpp"
namespace cs{
// System Dimensions
double system_dimensions[3]={77.0,77.0,77.0}; /// Size of system (A)
bool pbc[3]={false,false,false}; /// Periodic boundary conditions
bool SelectMaterialByGeometry=false; /// Toggle override of input material type by geometry
unsigned int total_num_unit_cells[3]={0,0,0}; /// Unit cells for entire system (x,y,z)
unsigned int local_num_unit_cells[3]={0,0,0}; /// Unit cells on local processor (x,y,z)
// System Parameters
int particle_creation_parity=0; /// Offset of particle centre (odd/even)
double particle_scale=50.0; /// Diameter of particles/grains (A)
double particle_spacing=10.0; /// Spacing Between particles (A)
double particle_array_offset_x=0.0; /// Offset particle array along x-direction;
double particle_array_offset_y=0.0; /// Offset particle array along y-direction;
double particle_shape_factor_x=1.0; /// Normalised particle shape
double particle_shape_factor_y=1.0; /// Normalised particle shape
double particle_shape_factor_z=1.0; /// Normalised particle shape
// Other directives and flags
bool single_spin=false;
int system_creation_flags[10]={0,0,0,0,0,0,0,0,0,0};
bool fill_core_shell=true;
bool core_shell_particles = false;
// Variables for multilayer system
bool multilayers = false;
bool multilayer_height_category = false; // enable height categorization by multilayer number
int num_multilayers = 1;
// Variables for interfacial roughness control
bool interfacial_roughness=false;
bool interfacial_roughness_local_height_field=false;
int interfacial_roughness_type=0; /// Sets peaks (1), troughs (-1) or both (0)
unsigned int interfacial_roughness_random_seed=23456;
unsigned int interfacial_roughness_seed_count=20; /// Number of seeds
double interfacial_roughness_height_field_resolution=3.5; /// Angstroms
double interfacial_roughness_mean_seed_radius=30.0; /// Angstroms
double interfacial_roughness_seed_radius_variance=0.5; /// Variance as fraction of mean radius
double interfacial_roughness_mean_seed_height=3.0; /// Angstroms
double interfacial_roughness_seed_height_max=1.8; /// Angstroms
// unit cell container
unitcell::unit_cell_t unit_cell;
// Array for storing non-magnetic atoms
std::vector<nm_atom_t> non_magnetic_atoms_array;
int create(){
//----------------------------------------------------------
// check calling of routine if error checking is activated
//----------------------------------------------------------
if(err::check==true){std::cout << "cs::create has been called" << std::endl;}
if(vmpi::my_rank==0){
std::cout << "Creating system" << std::endl;
}
//=============================================================
// System creation variables
//=============================================================
// initialise create module parameters
create::initialize();
// Atom creation array
std::vector<cs::catom_t> catom_array;
// initialise unit cell for system
uc::initialise(cs::unit_cell);
// Instantiate some constants for improved readability
const double ucx = unit_cell.dimensions[0];
const double ucy = unit_cell.dimensions[1];
const double ucz = unit_cell.dimensions[2];
const unsigned int na = unit_cell.atom.size();
// Calculate number of global and local unit cells required (rounding up)
// Must be set before rounding up system dimensions for periodic boundary conditions
cs::total_num_unit_cells[0]=int(vmath::iceil(cs::system_dimensions[0]/unit_cell.dimensions[0]));
cs::total_num_unit_cells[1]=int(vmath::iceil(cs::system_dimensions[1]/unit_cell.dimensions[1]));
cs::total_num_unit_cells[2]=int(vmath::iceil(cs::system_dimensions[2]/unit_cell.dimensions[2]));
// check for pbc and if so round up system dimensions
if(cs::pbc[0]==true) cs::system_dimensions[0]=unit_cell.dimensions[0]*(int(vmath::iceil(cs::system_dimensions[0]/unit_cell.dimensions[0])));
if(cs::pbc[1]==true) cs::system_dimensions[1]=unit_cell.dimensions[1]*(int(vmath::iceil(cs::system_dimensions[1]/unit_cell.dimensions[1])));
if(cs::pbc[2]==true) cs::system_dimensions[2]=unit_cell.dimensions[2]*(int(vmath::iceil(cs::system_dimensions[2]/unit_cell.dimensions[2])));
// Set up Parallel Decomposition if required
#ifdef MPICF
if(vmpi::mpi_mode==0) vmpi::geometric_decomposition(vmpi::num_processors,cs::system_dimensions);
#endif
// Create block of crystal of desired size
cs::create_crystal_structure(catom_array);
// Cut system to the correct type, species etc
create::create_system_type(catom_array);
// Copy atoms for interprocessor communications
#ifdef MPICF
if(vmpi::mpi_mode==0){
create::internal::copy_halo_atoms(catom_array);
}
#endif
//---------------------------------------------
// Create Neighbour lists for system
//---------------------------------------------
neighbours::list_t bilinear; // bilinear exchange list
neighbours::list_t biquadratic; // biquadratic exchange list
// generate bilinear exchange list
bilinear.generate(catom_array, cs::unit_cell.bilinear, na, ucx, ucy, ucz);
// optionally create a biquadratic neighbour list
if(exchange::biquadratic){
biquadratic.generate(catom_array, cs::unit_cell.biquadratic, na, ucx, ucy, ucz);
}
#ifdef MPICF
create::internal::identify_mpi_boundary_atoms(catom_array,bilinear);
if(exchange::biquadratic) create::internal::identify_mpi_boundary_atoms(catom_array,biquadratic);
create::internal::mark_non_interacting_halo(catom_array);
// Sort Arrays by MPI Type
create::internal::sort_atoms_by_mpi_type(catom_array, bilinear, biquadratic);
#endif
#ifdef MPICF
// ** Must be done in parallel **
create::internal::init_mpi_comms(catom_array);
vmpi::barrier();
#endif
// Print informative message
std::cout << "Copying system data to optimised data structures." << std::endl;
zlog << zTs() << "Copying system data to optimised data structures." << std::endl;
create::internal::set_atom_vars(catom_array, bilinear, biquadratic);
// Determine number of local atoms
#ifdef MPICF
int num_local_atoms = vmpi::num_core_atoms+vmpi::num_bdry_atoms;
#else
int num_local_atoms = atoms::num_atoms;
// set number of core atoms for serial code (to allow wraper functions to work seamlessly)
vmpi::num_core_atoms = atoms::num_atoms;
#endif
// Set grain and cell variables for simulation
grains::set_properties();
cells::initialize(cs::system_dimensions[0],
cs::system_dimensions[1],
cs::system_dimensions[2],
cs::unit_cell.dimensions[0],
cs::unit_cell.dimensions[1],
cs::unit_cell.dimensions[2],
atoms::x_coord_array,
atoms::y_coord_array,
atoms::z_coord_array,
atoms::type_array,
atoms::cell_array,
create::num_total_atoms_non_filler,
atoms::num_atoms
);
//----------------------------------------
// Initialise spin torque data
//----------------------------------------
st::initialise(cs::system_dimensions[0],
cs::system_dimensions[1],
cs::system_dimensions[2],
atoms::x_coord_array,
atoms::y_coord_array,
atoms::z_coord_array,
atoms::type_array,
num_local_atoms);
//----------------------------------------
// Initialise local temperature data
//----------------------------------------
ltmp::initialise(cs::system_dimensions[0],
cs::system_dimensions[1],
cs::system_dimensions[2],
atoms::x_coord_array,
atoms::y_coord_array,
atoms::z_coord_array,
atoms::type_array,
num_local_atoms,
sim::Teq,
sim::pump_power,
sim::pump_time,
sim::TTG,
sim::TTCe,
sim::TTCl,
mp::dt_SI,
sim::Tmin,
sim::Tmax);
//std::cout << num_atoms << std::endl;
#ifdef MPICF
//std::cout << "Outputting coordinate data" << std::endl;
//vmpi::crystal_xyz(catom_array);
int my_num_atoms=vmpi::num_core_atoms+vmpi::num_bdry_atoms;
//std::cout << "my_num_atoms == " << my_num_atoms << std::endl;
int total_num_atoms=0;
MPI_Reduce(&my_num_atoms,&total_num_atoms, 1,MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
std::cout << "Total number of atoms (all CPUs): " << total_num_atoms << std::endl;
zlog << zTs() << "Total number of atoms (all CPUs): " << total_num_atoms << std::endl;
#else
std::cout << "Number of atoms generated: " << atoms::num_atoms << std::endl;
zlog << zTs() << "Number of atoms generated: " << atoms::num_atoms << std::endl;
#endif
return EXIT_SUCCESS;
}
} // end of create namespace