5 #include <ObjexxFCL/FArray2D.hh>
8 #include <basic/Tracer.hh>
12 #include <ObjexxFCL/format.hh>
14 #include <basic/prof.hh>
15 #include <utility/io/mpistream.hh>
24 #include <basic/options/option.hh>
25 #include <basic/options/after_opts.hh>
26 #include <basic/options/option_macros.hh>
30 #include <basic/prof.hh>
34 namespace canonical_sampling{
35 namespace mc_convergence_checks {
37 static basic::Tracer
tr(
"MPIPool_ConvergenceCheck");
44 using namespace ObjexxFCL;
46 using namespace utility::io::mpi_stream;
47 using namespace core::io::silent;
49 int const POSE_TRANSFER = 2000;
50 int const TAG_TRANSFER = 2001;
51 int const MPI_UPDATE = 1000;
52 int const MPI_ADD_POSE_TO_POOL = 1001;
53 int const MPI_DATA_TRANSFER = 1003;
54 int const MPI_BOOL = 1006;
55 int const UPDATE_MISSING_POSE = 1004;
56 int const MPI_REPORT_SIZE = 1007;
57 int const MPI_FINISHED = 1008;
62 typedef FArray2P<double> FArray2P_double;
68 Pool_RMSD( silent_file ),
69 trajectories_finished_( 0 ),
73 transition_threshold_(-1),
74 new_decoys_out_(
"discovered_decoys.out" )
77 tr.Debug <<
"checking: rank " <<
rank_ <<
" has " <<
Pool_RMSD::size() <<
" structures in its pool " << std::endl;
105 PROF_START( basic::CHECK_COMM_SIZE );
106 MPI_Comm_rank( MPI_COMM_WORLD, (
int* )( &
rank_ ) );
107 MPI_Comm_size( MPI_COMM_WORLD, (
int* )( &
npes_ ) );
108 PROF_STOP( basic::CHECK_COMM_SIZE );
114 tr.Debug <<
"initializing master pool. setting pool_size_ " <<
Pool_RMSD::size() << std::endl;
128 PROF_START( basic::MPI_SEND_UPDATE );
129 tr.Debug <<
rank_ <<
" is sending an update message of type " << message_type <<
" to " << receiving_rank << std::endl;
130 MPI_Send( &message_type, 1, MPI_INT, receiving_rank, MPI_UPDATE, MPI_COMM_WORLD );
131 PROF_STOP( basic::MPI_SEND_UPDATE );
136 PROF_START( basic::MPI_SEND_UPDATE );
138 MPI_Recv( &message_type, 1, MPI_INT,
MPI_ANY_SOURCE, MPI_UPDATE, MPI_COMM_WORLD, &stat );
139 sending_rank = stat.MPI_SOURCE;
140 tr.Debug <<
"from " << sending_rank <<
" received an update message_type: " << message_type << std::endl;
141 PROF_STOP( basic::MPI_SEND_UPDATE );
149 while( num_to_send > 0 ){
153 tr.Debug <<
"Sending " << tag <<
" from rank " <<
rank_ <<
154 " at index " << (current_size - num_to_send + 1) <<
155 " current size is " << current_size <<
156 " num to send is " << num_to_send <<
157 " to receiving_rank " << receiving_rank << std::endl;
158 send_xyz( coords, tag, receiving_rank );
164 FArray2D<double>&
xyz,
170 std::ostringstream double_to_string;
171 double_to_string.str( coords );
173 PROF_START( basic::MPI_SLAVE_REPORT_SIZES );
177 buf[ 2 ] = (double_to_string.str()).
size();
178 buf[ 3 ] = tag.size();
180 MPI_Send( &buf, 4, MPI_INT, receiving_rank, POSE_TRANSFER, MPI_COMM_WORLD );
181 PROF_STOP( basic::MPI_SLAVE_REPORT_SIZES );
182 PROF_START( basic::MPI_SLAVE_REPORT_NEW_COORDS );
184 const_cast<char*> (double_to_string.str().data()),
185 (double_to_string.str()).
size(),
191 tr.Debug << receiving_rank <<
" sending tag " << tag << std::endl;
192 MPI_Send( const_cast<char*> (tag.data()), tag.size(), MPI_CHAR, receiving_rank, TAG_TRANSFER, MPI_COMM_WORLD );
193 PROF_STOP( basic::MPI_SLAVE_REPORT_NEW_COORDS );
197 FArray2D<double>& xyz,
200 PROF_START( basic::FARRAY_MANIPULATION );
201 std::ostringstream double_to_string;
202 for(
unsigned i = 1; i <= xyz.u1(); i++ ){
203 for(
unsigned j = 1; j <= xyz.u2(); j++ ){
204 double_to_string <<
xyz( i, j ) <<
" ";
207 string = double_to_string.str();
208 PROF_STOP( basic::FARRAY_MANIPULATION );
212 while( num_to_get > 0 ){
218 tr.Debug <<
"added pose (receive_newest_xyz) now has " <<
Pool_RMSD::size() << std::endl;
223 FArray2D<double>& xyz,
228 PROF_START( basic::MPI_SLAVE_REPORT_SIZES );
231 MPI_Recv( &buf, 4, MPI_INT, sending_rank, POSE_TRANSFER, MPI_COMM_WORLD, &stat );
238 received_string_size = buf[ 2 ];
240 PROF_STOP( basic::MPI_SLAVE_REPORT_SIZES );
242 xyz.dimension( xyz_u1, xyz_u2, 0.0 );
244 PROF_START( basic::MPI_SLAVE_REPORT_NEW_COORDS );
245 char *
matrix =
new char[ received_string_size + 1 ];
247 MPI_Recv( matrix, received_string_size , MPI_CHAR, sending_rank, MPI_DATA_TRANSFER, MPI_COMM_WORLD, &stat );
248 data.assign( matrix, received_string_size );
250 char *cbuf =
new char[tag_size+1];
251 MPI_Recv( cbuf, tag_size, MPI_CHAR, sending_rank, TAG_TRANSFER, MPI_COMM_WORLD, &stat );
252 tag.assign( cbuf, tag_size );
253 PROF_STOP( basic::MPI_SLAVE_REPORT_NEW_COORDS );
258 std::istringstream string_to_double;
259 string_to_double.str(
string);
260 PROF_START( basic::FARRAY_MANIPULATION );
264 string_to_double >> element;
265 xyz( i, j ) = element;
268 PROF_STOP( basic::FARRAY_MANIPULATION );
284 int target_rank_pool_size;
286 tr.Debug <<
"(get_pool_diff) current size is " << my_pool_size << std::endl;
290 PROF_START( basic::MPI_SYNC_POOL_DIFF );
291 MPI_Recv( &target_rank_pool_size, 1, MPI_INT, target_rank, MPI_REPORT_SIZE, MPI_COMM_WORLD, &status );
292 num_diff = my_pool_size - target_rank_pool_size;
293 MPI_Send( &num_diff, 1, MPI_INT, target_rank, MPI_REPORT_SIZE, MPI_COMM_WORLD );
294 tr.Debug <<
"MASTER NODE HAS " << my_pool_size <<
" AND SLAVE HAS " << target_rank_pool_size <<
" SO NEED TO UPDATE " << num_diff <<
" LAST POSES " << std::endl;
295 PROF_STOP( basic::MPI_SYNC_POOL_DIFF );
297 PROF_START( basic::MPI_SYNC_POOL_DIFF );
298 MPI_Send( &my_pool_size, 1, MPI_INT,
master_node_, MPI_REPORT_SIZE, MPI_COMM_WORLD );
299 MPI_Recv( &num_diff, 1, MPI_INT,
master_node_, MPI_REPORT_SIZE, MPI_COMM_WORLD, &status );
300 PROF_STOP( basic::MPI_SYNC_POOL_DIFF );
311 tr.Debug <<
"(send_accepted)sending to rank " << rank <<
" " << truefalse << std::endl;
312 PROF_START( basic::MPI_SEND_ACCEPTED );
313 MPI_Send( &bool_to_int, 1, MPI_INT, rank, MPI_BOOL, MPI_COMM_WORLD );
314 PROF_STOP( basic::MPI_SEND_ACCEPTED );
320 PROF_START( basic::MPI_SEND_ACCEPTED );
321 MPI_Recv( &bool_to_int, 1, MPI_INT, rank, MPI_BOOL, MPI_COMM_WORLD, &stat );
322 PROF_STOP( basic::MPI_SEND_ACCEPTED );
323 if( bool_to_int == 1){
333 tr.Debug <<
"num trajectories finished: " <<
338 tr.Debug <<
"FINISHED! num trajectories finished: " <<
347 tr.Debug <<
"sending finalized message to master" << std::endl;
354 tr.Debug <<
"in master go master rank is " <<
rank_ << std::endl;
356 PROF_START( basic::MPICANONICALSAMPLING );
363 if( message_type == UPDATE_MISSING_POSE ){
367 else if(message_type == MPI_ADD_POSE_TO_POOL){
369 FArray2D<double> new_coords;
376 bool is_accepted =
false;
377 if( num_recent_updates == 0 ){
392 <<
" new_structures_ " << new_structures_
396 runtime_assert( new_structures_ >= num_recent_updates );
397 tr.Debug <<
"starting evaluation from index: " << (new_structures_ - num_recent_updates +1) <<
" of " << new_structures_ << std::endl;
398 Pool_RMSD::evaluate( new_coords, best_decoy, best_rmsd, (new_structures_ - num_recent_updates +1) );
401 tr.Debug <<
"best_rmsd " << best_rmsd <<
419 }
else if( message_type == MPI_FINISHED ){
423 PROF_STOP( basic::MPICANONICALSAMPLING );
424 tr.Debug <<
"master node finished " << std::endl;
429 tr.Debug <<
"testing if is master node: rank is " <<
rank_ <<
" and master node rank is " <<
master_node_ << std::endl;
442 tr.Debug <<
"node is rank " <<
rank_ <<
" out of " <<
npes_ << std::endl;
443 tr.Debug <<
" using MPIPool_RMSD::evaluate_and_add" << std::endl;
446 PROF_START( basic::MPICANONICALSAMPLING );
455 PROF_STOP( basic::MPICANONICALSAMPLING );
458 tr.Debug <<
"best rmsd after evaluation is " << best_rmsd <<
" threadhol " << transition_threshold << std::endl;
459 if(best_rmsd > transition_threshold){
460 tr.Debug <<
"best_rmsd is " << best_rmsd <<
" which is greater than transition_threshold, adding pose to pool " << std::endl;
462 PROF_START( basic::MPICANONICALSAMPLING );
466 PROF_START( basic::FARRAY_MANIPULATION );
468 PROF_STOP( basic::FARRAY_MANIPULATION );
481 PROF_STOP( basic::MPICANONICALSAMPLING );
484 tr.Debug <<
"master accepted new pose(evaluate). adding new pose to pool, "
489 PROF_START( basic::WRITE_TO_FILE );
491 ss->fill_struct( pose, new_cluster_tag );
494 PROF_STOP( basic::WRITE_TO_FILE );