@@ -45,14 +45,106 @@ int main(int argc, char *argv[]) {
4545 std::string input_file = argv[1 ];
4646 // Read input file
4747 Inputs inputs (id, input_file);
48+ std::string simulation_type = inputs.simulation_type ;
4849
4950 // Setup local and global grids, decomposing domain (needed to construct temperature)
5051 Grid grid (inputs.simulation_type , id, np, inputs.domain .number_of_layers , inputs.domain , inputs.substrate ,
5152 inputs.temperature );
5253 // Temperature fields characterized by data in this structure
5354 Temperature<memory_space> temperature (grid, inputs.temperature , inputs.print .store_solidification_start );
5455
55- runExaCA (id, np, inputs, timers, grid, temperature);
56+ // Material response function
57+ InterfacialResponseFunction irf (inputs.domain .deltat , grid.deltax , inputs.irf );
58+
59+ // Read temperature data if necessary
60+ if (simulation_type == " FromFile" )
61+ temperature.readTemperatureData (id, grid, 0 );
62+ // Initialize the temperature fields for the simulation type of interest
63+ if ((simulation_type == " Directional" ) || (simulation_type == " SingleGrain" ))
64+ temperature.initialize (id, simulation_type, grid, inputs.domain .deltat );
65+ else if (simulation_type == " Spot" )
66+ temperature.initialize (id, grid, irf.freezingRange (), inputs.domain .deltat , inputs.domain .spot_radius );
67+ else if ((simulation_type == " FromFile" ) || (simulation_type == " FromFinch" ))
68+ temperature.initialize (0 , id, grid, irf.freezingRange (), inputs.domain .deltat , simulation_type);
69+ MPI_Barrier (MPI_COMM_WORLD);
70+
71+ // Initialize grain orientations
72+ Orientation<memory_space> orientation (id, inputs.grain_orientation_file , false );
73+ MPI_Barrier (MPI_COMM_WORLD);
74+
75+ // Initialize cell types, grain IDs, and layer IDs
76+ CellData<memory_space> celldata (grid, inputs.substrate , inputs.print .store_melt_pool_edge );
77+ if (simulation_type == " Directional" )
78+ celldata.initSubstrate (id, grid, inputs.rng_seed );
79+ else if (simulation_type == " SingleGrain" )
80+ celldata.initSubstrate (id, grid);
81+ else
82+ celldata.initSubstrate (id, grid, inputs.rng_seed , temperature.number_of_solidification_events );
83+ MPI_Barrier (MPI_COMM_WORLD);
84+
85+ // Variables characterizing the active cell region within each rank's grid, including buffers for ghost node
86+ // data (fixed size) and the steering vector/steering vector size on host/device
87+ Interface<memory_space> interface (id, grid.domain_size , inputs.substrate .init_oct_size );
88+ MPI_Barrier (MPI_COMM_WORLD);
89+
90+ // Nucleation data structure, containing views of nuclei locations, time steps, and ids, and nucleation
91+ // event counters - initialized with an estimate on the number of nuclei in the layer Without knowing
92+ // estimated_nuclei_this_rank_this_layer yet, initialize nucleation data structures to estimated sizes,
93+ // resize inside of placeNuclei when the number of nuclei per rank is known
94+ int estimated_nuclei_this_rank_this_layer =
95+ inputs.nucleation .n_max * pow (grid.deltax , 3 ) * grid.domain_size ;
96+ Nucleation<memory_space> nucleation (estimated_nuclei_this_rank_this_layer, inputs.nucleation );
97+ // Fill in nucleation data structures, and assign nucleation undercooling values to potential nucleation
98+ // events Potential nucleation grains are only associated with liquid cells in layer 0 - they will be
99+ // initialized for each successive layer when layer 0 is complete
100+ nucleation.placeNuclei (temperature, inputs.rng_seed , 0 , grid, id);
101+
102+ // Initialize printing struct from inputs
103+ Print print (grid, np, inputs.print );
104+
105+ // End of initialization
106+ timers.stopInit ();
107+ MPI_Barrier (MPI_COMM_WORLD);
108+
109+ int cycle = 0 ;
110+ timers.startRun ();
111+
112+ // Run ExaCA to model solidification of each layer
113+ for (int layernumber = 0 ; layernumber < grid.number_of_layers ; layernumber++) {
114+ timers.startLayer ();
115+ runExaCALayer (id, np, layernumber, cycle, inputs, timers, grid, temperature, irf, orientation, celldata,
116+ interface, nucleation, print, simulation_type);
117+
118+ if (layernumber != grid.number_of_layers - 1 ) {
119+ // Initialize new temperature field data for layer "layernumber + 1"
120+ // TODO: reorganize these temperature functions calls into a temperature.init_next_layer as done
121+ // with the substrate If the next layer's temperature data isn't already stored, it should be read
122+ if ((simulation_type == " FromFile" ) && (inputs.temperature .layerwise_temp_read ))
123+ temperature.readTemperatureData (id, grid, layernumber + 1 );
124+ MPI_Barrier (MPI_COMM_WORLD);
125+ // Initialize next layer's temperature data
126+ temperature.initialize (layernumber + 1 , id, grid, irf.freezingRange (), inputs.domain .deltat ,
127+ simulation_type);
128+ // Reset solidification event counter of all cells to zeros for the next layer, resizing to number
129+ // of cells associated with the next layer, and get the subview for undercooling
130+ temperature.resetLayerEventsUndercooling (grid);
131+
132+ // Initialize next layer of the simulation
133+ initExaCALayer (id, np, layernumber, cycle, inputs, grid, temperature, orientation, celldata,
134+ interface, nucleation, print, simulation_type);
135+ timers.stopLayer (layernumber);
136+ }
137+ else {
138+ MPI_Barrier (MPI_COMM_WORLD);
139+ timers.stopLayer ();
140+ }
141+ }
142+ timers.stopRun ();
143+ MPI_Barrier (MPI_COMM_WORLD);
144+
145+ // Print ExaCA end-of-run data
146+ finalizeExaCA (id, np, cycle, inputs, timers, grid, temperature, irf, orientation, celldata, interface,
147+ nucleation, print, simulation_type);
56148 }
57149 }
58150 // Finalize Kokkos
0 commit comments