Rosetta 3.5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
MPIHPool_ConvergenceCheck.cc
Go to the documentation of this file.
1 #ifdef USEMPI
2 #include <mpi.h>
3 #endif
4 
11 
12 // AUTO-REMOVED #include <basic/options/option.hh>
13 // AUTO-REMOVED #include <basic/options/option_macros.hh>
14 
15 // MPI only headers (wrapping these headers in an ifdef block will prevent my #inclusion-removal script from removing them)
16 #ifdef USEMPI
21 
26 
27 #include <utility/exit.hh>
28 #include <utility/file/file_sys_util.hh>
29 #include <basic/prof.hh>
30 #include <basic/options/keys/cluster.OptionKeys.gen.hh>
31 #include <basic/options/keys/mc.OptionKeys.gen.hh>
32 #include <basic/options/keys/score.OptionKeys.gen.hh>
33 #endif
34 
36 #include <ObjexxFCL/FArray3D.hh>
37 #include <ObjexxFCL/FArray2D.hh>
38 #include <core/pose/Pose.hh>
39 #include <basic/Tracer.hh>
40 #include <core/types.hh>
41 
42 // AUTO-REMOVED #include <numeric/xyzVector.hh>
43 // AUTO-REMOVED #include <core/conformation/Residue.hh>
44 
45 // AUTO-REMOVED #include <ctime>
46 
47 #include <utility/vector1.hh>
48 
49 
50 static basic::Tracer tr("MPIHPool_ConvergenceCheck");
51 
52 namespace protocols{
53 namespace canonical_sampling{
54 namespace mc_convergence_checks{
55 
56  int const FINISHED = 1;
57  int const IN_PROGRESS = 0;
58  int const MPI_OUTPUT_RANK = 0;
59  int const OUTPUT_TAG = 1000;
60 
61 #ifdef USEMPI
62  MPI_Comm protocols::canonical_sampling::mc_convergence_checks::MPIHPool_RMSD::MPI_COMM_POOL;
63  using namespace basic;
64  //
65 
66  //specified silent-file assumed to contain top-level structures only
67  MPIHPool_RMSD::MPIHPool_RMSD( std::string const& silent_file, core::Size levels ):
68  Pool_RMSD(),
69  hlevel_( levels, silent_file ),
70  pool_size_(0),
71  num_structures_added_(0),
72  npes_(-1),
73  rank_(-1),
74  new_decoys_out_("discovered_decoys.out"),
75  nresidues_(-1),
76  nlevels_( levels ),
77  first_time_writing_( true ),
78  level_radii_(),
79  current_address_(),
80  current_best_rmsds_(),
81  best_address_(),
82  current_address_str_("no_address"),
83  buf_(),
84  current_trajectory_state_(IN_PROGRESS)
85  {
86  initialize();
87  }
88 
89  void
90  MPIHPool_RMSD::receive_and_output_structures(
92  core::Size num_structures_to_write
93  ) {
94  while( num_structures_to_write > 0 ) {
96  receive_silent_struct_any_source( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ );
97  if( first_time_writing_ ) {
98  first_time_writing_ = false;
99  write_headers_to_hierarchy( ss );
100  }
101  write_decoys_to_hierarchy( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ );
102  num_structures_to_write--;
103  }
104  }
105 
106  void
107  MPIHPool_RMSD::resolve_address_and_assign_tag( Address& new_addr, core::Size& new_level_start, std::string& new_candidate_tag ) {
108  new_level_start = 0;
109  for( core::Size ii = 1; ii <= new_addr.size(); ii++ ) {
110  if( new_addr[ ii ] == 0 ) {
111  if( new_level_start == 0 ) new_level_start = ii;
112  core::Size next_free_index = hlevel_.pool_size( new_addr, ii - 1 ) + 1;
113  runtime_assert( next_free_index != 0 );
114  new_addr[ ii ] = next_free_index;
115  }
116  }
117  core::Size id_num = hlevel_.pool_size( new_addr, hlevel_.nlevels() ) + 1;
118  if( new_level_start < new_addr.size() && new_level_start > 0 && id_num != 1 ) {
119  if ( tr.Error.visible() ) {
120  tr.Error << "new level start is " << new_level_start << " so new branches are created, but pool size is NOT zero! " << hlevel_.pool_size( new_addr, hlevel_.nlevels() ) << " for address: ";
121  for( core::Size ii = 1; ii <= new_addr.size(); ii++ ) {
122  tr.Error << new_addr[ ii ] << " ";
123  }
124  tr.Error << std::endl;
125  }
126  }
127  if( tr.visible() ) tr.Debug << "about to check runtime-asserts: " << id_num << " " << hlevel_.first_zero_pos( new_addr ) << " " << hlevel_.nlevels() << std::endl;
128 
129  runtime_assert( ( id_num == 1 && hlevel_.first_zero_pos( new_addr ) <= hlevel_.nlevels() ) ||
130  ( id_num >= 1 && hlevel_.first_zero_pos( new_addr ) > hlevel_.nlevels() ) ); //if new branches are created, make sure structure is first in pool
131  if( tracer_visible_ ) {
132  tr.Debug << "pool size is " << id_num << " for addr: ";
133  for( core::Size ii = 1; ii <= hlevel_.nlevels(); ii++ ) {
134  tr.Debug << new_addr[ ii ] << " | ";
135  }
136  for( core::Size ii = 1; ii <= hlevel_.nlevels(); ii++ ) {
137  tr.Debug << buf_.candidate_address_[ ii ] << " ";
138  }
139  tr.Debug << std::endl;
140  }
141 
142  assign_tag( new_addr, id_num, new_candidate_tag );
143  }
144 
145 
146  core::Size
147  MPIHPool_RMSD::evaluate_and_add(
148  core::pose::Pose const& pose,
149  std::string& best_decoy,
150  core::Real& best_rmsd ) {
151 
152  using namespace basic;
153  PROF_START( basic::MPIBARRIER_BEGIN );
154  MPI_Barrier( MPI_COMM_POOL );
155  PROF_STOP( basic::MPIBARRIER_BEGIN );
156 
157  PROF_START( basic::MPIH_EVAL_CHECK_PROGRESS );
158  if( tracer_visible_ ) {
159  tr.Debug << "now in evaluate and add, number of structures in top-level pool: " << hlevel_.top_level_pool_size() << " pool-rank: " << pool_rank_ << " pool-size: " << pool_npes_ << std::endl;
160  }
161 
162  if( nresidues_ != pose.total_residue() ) {
163  //~buf_(); //call destructor?
164  //if sizes are in-consistent, setup new arrays
165  nresidues_ = pose.total_residue();
166  runtime_assert( pool_npes_ > 0 );
167  buf_.setup( pool_npes_, nresidues_, nlevels_ );
168  }
169 
170  //check progress. have any nodes finished?
171 
172  core::Size num_nodes_finished = any_node_finished();
173  if( num_nodes_finished > 0 ) {
174  if( pool_npes_ - num_nodes_finished == 0 ) {
175  if( tracer_visible_ ) {
176  tr.Info << "no more nodes running trajectories, returning " << std::endl;
177  tr.Info << "finishing trajectory" << std::endl;
178  }
179  return -1; //no more nstruct, so simply return
180  }
181  update_comm( pool_npes_ - num_nodes_finished );
182  if( current_trajectory_state_ == IN_PROGRESS ) {
183  MPI_Comm_rank( MPI_COMM_POOL, ( int* )( &pool_rank_ ) );
184  MPI_Comm_size( MPI_COMM_POOL, ( int* )( &pool_npes_ ) );
185  }
186  }
187 
188  PROF_STOP( basic::MPIH_EVAL_CHECK_PROGRESS );
189  core::Size best_index = -1;
190  if( current_trajectory_state_ == IN_PROGRESS ) {
191  //evaluate the structure
192  best_index = hlevel_.evaluate( pose, best_decoy, current_best_rmsds_, best_address_ );
193  if( tracer_visible_ ) {
194  tr.Debug << "finished evaluating, best decoy has the tag: " << best_decoy << "\n";
195  for( core::Size ii = 1; ii <= current_best_rmsds_.size() ; ii++ ) {
196  tr.Debug << "level=" << ii << " best-rmsd=" << current_best_rmsds_[ ii ] << " level-radii=" << level_radii_[ ii ] << " best-level-address=" << best_address_[ ii ] << std::endl;
197  }
198  tr.Debug << "done dumping out information about best-rmsd" << std::endl;
199  }
200 
201  hlevel_.debug_print_size_per_level();
202  PROF_START( basic::MPIH_EVAL_COMMUNICATE_NEW );
203  //store the highest-resolution rmsd
204  core::Size sendcounts = 0;
205  //send coordinates and information about evaluation to other nodes
206  protocols::toolbox::fill_CA_coords( pose, buf_.coords_ );
207  if ( is_new_structure( best_address_, level_radii_, current_best_rmsds_ ) ) {
208  if( tr.visible() ) tr.Debug << "i've found a new structure!" << std::endl;
209  runtime_assert(buf_.coords_.u1() > 0 && buf_.coords_.u2() > 0);
210  runtime_assert(buf_.coords_.u1() > 0 && buf_.coords_.u2() > 0);
211  prepare_send_new_coords( true ); //uses buf_.int_buf1_
212  sendcounts = ( 3 * nresidues_ );
213  } else {
214  prepare_send_new_coords( false ); //uses buf_.int_buf1_
215  }
216  MPI_Allgather( buf_.int_buf1_, nlevels_, MPI_INT, buf_.neighbor_addresses_, nlevels_, MPI_INT, MPI_COMM_POOL );
217 
218  best_rmsd = current_best_rmsds_[ current_best_rmsds_.size() ]; //this is what is written out to traj files
219  double candidate_best_rms = best_rmsd;
220  //remove this, not needed?
221  MPI_Allgather( &candidate_best_rms, 1, MPI_DOUBLE, buf_.candidate_best_rmsds_, 1, MPI_DOUBLE, MPI_COMM_POOL );
222  if( tracer_visible_ ) {
223  tr.Debug << "address of all-nbrs: " << (nlevels_ * pool_npes_ )
224  << " nlevel: " << nlevels_ << " npes " << pool_npes_ << std::endl;
225  for( core::Size ii = 0; ii < ( nlevels_ * pool_npes_ ); ii++ ) {
226  tr.Debug << buf_.neighbor_addresses_[ ii ] << " ";
227  }
228  tr.Debug << std::endl;
229  }
230  if( tracer_visible_ ) tr.Debug << "scan output and setup to receive" << std::endl;
231  scan_output_and_setup_to_receive(); //scans neighbor_addresses_ and determines nbrs
232  if( tracer_visible_ ) tr.Debug << "done calling scan output and setup to receive" << std::endl;
233  MPI_Allgatherv( buf_.coords_transfer_buffer_,
234  sendcounts,
235  MPI_DOUBLE,
236  buf_.coords_receiving_buffer_,
237  buf_.int_buf1_, //counts
238  buf_.memory_offset_,//displacements
239  MPI_DOUBLE,
240  MPI_COMM_POOL
241  ); //have to receive all structures
242 
243  PROF_STOP( basic::MPIH_EVAL_COMMUNICATE_NEW );
244 
245  bool i_am_a_winning_rank = false;
246  buf_.candidate_nbr_index_ = 0;
247  utility::vector1< Address > prev_added_addresses;
248  utility::vector1< core::Size > prev_added_start_indices;
249  Address new_addr;
250  num_structures_added_ = 0;
251 
252  if( buf_.num_new_neighbors_ > 0 ) {
253  std::string new_candidate_tag;
254  PROF_START( basic::MPIH_ADD_FIRST_STRUCTURE );
255  bool has_new_structure = get_next_candidate(); //find next neighbor and copies into appropriate buffs
256  runtime_assert( has_new_structure ); //first structure in list is automatically added
257  ++num_structures_added_;
258  new_addr = buf_.candidate_address_;
259  core::Size new_level_start = 0;
260  Address unresolved = buf_.candidate_address_;
261  resolve_address_and_assign_tag( new_addr, new_level_start, new_candidate_tag ); //added 7/28/10
262 
263  // save this information for sending to output-node later
264  if( is_my_structure() ) {
265  i_am_a_winning_rank = true;
266  buf_.winning_address_ = new_addr;
267  buf_.winning_tag_ = new_candidate_tag;
268  buf_.new_level_begins_ = new_level_start;
269  }
270 
271  hlevel_.add_new( buf_.candidate_coords_, new_candidate_tag, new_addr );
272  if( tracer_visible_ ) {
273  tr.Debug << "adding this structure to hierarchy: ";
274  for( core::Size ii = 1; ii <= new_addr.size(); ii++ ) {
275  tr.Debug << new_addr[ ii ] << " ";
276  }
277  tr.Debug << " " << new_candidate_tag << std::endl;
278  }
279 
280  prev_added_addresses.push_back( unresolved );
281  //keep track of what addresses have been added so far for evaluation of new structures
282  //prev_added_addresses.push_back( new_addr );
283  std::list<PoolData>::iterator itr;
284  hlevel_.level_find( new_addr, hlevel_.nlevels(), itr );
285  prev_added_start_indices.push_back( (*itr).pool_->size() );
286 
287  PROF_STOP( basic::MPIH_ADD_FIRST_STRUCTURE );
288  //after processing the first structure, go through the rest and evaluate against newly discovered
289  while( get_next_candidate() ) {
290  PROF_START( basic::MPIH_EVAL_AGAINST_NBR );
291  //setup the universal address for evaluation
292  Address best_addr( hlevel_.nlevels(), 0);
293  best_addr[ 1 ] = 1;
294  //
295  //tr.Debug << "about to evaluate against previously added structures" << std::endl;
296  bool is_new_structure = true;
297  for( core::Size ii = 1; ii <= prev_added_addresses.size(); ii++ ) {
298  utility::vector1< core::Real > candidate_rmsd( hlevel_.nlevels(), 0.0 );
299  bool equal_addresses = true;
300  for( core::Size jj = 1; jj <= buf_.candidate_address_.size(); jj++ ) {
301  if( buf_.candidate_address_[ jj ] != (prev_added_addresses[ ii ])[ jj ] ) {
302  equal_addresses = false;
303  break;
304  }
305  }
306  if( equal_addresses ) {
307  Address test_addr = prev_added_addresses[ ii ];
308  hlevel_.evaluate( buf_.candidate_coords_,
309  new_candidate_tag,
310  candidate_rmsd,
311  test_addr,
312  false,
313  false );
314  core::Size last_pos_nonzero = hlevel_.first_zero_pos( test_addr ) - 1;
315  if( candidate_rmsd[ last_pos_nonzero ] < level_radii_[ last_pos_nonzero ] ) {
316  //tr.Debug << "lowest resolved level: " << last_pos_nonzero << " for evaluated address: ";
317  if( tr.visible() ) {
318  for( core::Size ii = 1; ii <= candidate_rmsd.size(); ii++ ) {
319  tr.Debug << "level-addr=" << test_addr[ ii ] << " rms=" << candidate_rmsd[ ii ] << " radius=" << level_radii_[ ii ] << std::endl;
320  }
321  tr.Debug << "hence this structure is deemed a redundant structure, rejecting!" << std::endl;
322  }
323  is_new_structure = false;
324  break;
325  }
326  } //if( equal_addresses )
327  }
328  //tr.Debug << "done evaluating against previously added structures, tag: " << new_candidate_tag << " and addr: "; print_address( best_addr );
329 
330  if( is_new_structure ) {
331  ++num_structures_added_;
332  utility::vector1< core::Real > best_test_rmsd( hlevel_.nlevels(), 0.0 );
333  Address new_addr = buf_.candidate_address_;
334  Address unresolved = new_addr;
335  hlevel_.evaluate( buf_.candidate_coords_,
336  new_candidate_tag,
337  best_test_rmsd,
338  new_addr,
339  false,
340  false ); //attempt to resolve any remaining levels which may be sub-clusters of structures which were just added
341  core::Size new_level_start = 0;
342  if( hlevel_.address_exists_in_cache( new_addr ) ) {
343  new_candidate_tag = "";
344  resolve_address_and_assign_tag( new_addr, new_level_start, new_candidate_tag );
345  hlevel_.add_new( buf_.candidate_coords_, new_candidate_tag, new_addr );
346 
347  if( is_my_structure() ) {
348  i_am_a_winning_rank = true;
349  buf_.winning_address_ = new_addr;
350  buf_.winning_tag_ = new_candidate_tag;
351  buf_.new_level_begins_ = new_level_start;
352  }
353  if( tracer_visible_ ) {
354  tr.Debug << "adding structure to hierarchy ";
355  for( core::Size ii = 1; ii <= new_addr.size(); ii++ ) {
356  tr.Debug << new_addr[ ii ] << " ";
357  }
358  tr.Debug << " " << new_candidate_tag << std::endl;
359  }
360 
361  core::Size index = find_address( new_addr, prev_added_addresses );
362  if( index > prev_added_addresses.size() ) { //not seen before
363  //save the address if it hasn't been seen before
364  prev_added_addresses.push_back( unresolved );
365  std::list<PoolData>::iterator itr;
366  hlevel_.level_find( new_addr, hlevel_.nlevels(), itr );
367  prev_added_start_indices.push_back( (*itr).pool_->size() );
368  }
369  }
370  }
371  PROF_STOP( basic::MPIH_EVAL_AGAINST_NBR );
372  }
373  send_receive_and_write_structures( i_am_a_winning_rank, pose );
374  if ( num_structures_added_ > 0 ) {
375  //if new structures have been added after the first eval, update the potentially out-of-date assignment
376  PROF_START( basic::MPIH_UPDATE_EVAL );
377  //tr.Debug << "now doing second evaluation update" << std::endl;
378  //update previous evaluation, if need be.
379  std::string second_update_best_decoy;
380  utility::vector1< core::Real > second_update_rmsd( hlevel_.nlevels(), 0.0 );
381  for( core::Size ii =1 ; ii <= prev_added_addresses.size(); ii++ ) {
382  Address test_addr = prev_added_addresses[ ii ];
383  core::Size second_update_index = hlevel_.evaluate( buf_.coords_,//update original assignment
384  second_update_best_decoy,
385  second_update_rmsd,
386  test_addr,
387  false,
388  false );
389  if( hlevel_.first_zero_pos( test_addr ) == hlevel_.nlevels() + 1 && //only replace if fully resolved
390  second_update_rmsd[ second_update_rmsd.size() ] < best_rmsd &&
391  second_update_best_decoy.compare("") != 0 ) {
392  best_rmsd = second_update_rmsd[ second_update_rmsd.size() ];
393  best_decoy = second_update_best_decoy;
394  best_index = second_update_index;
395  }
396  }
397  }
398  PROF_STOP( basic::MPIH_UPDATE_EVAL );
399  } else {
400  send_receive_and_write_structures( false, pose );
401  /**
402  //if there are no new structures in my neighborhood, then report that I have no new structures
403  PROF_START( basic::MPIH_PREPARE_WRITE_STRUCTURES );
404  int num_to_print = 0;
405  int num_structures_to_write;
406  MPI_Reduce( &num_to_print, &num_structures_to_write, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_POOL );
407  PROF_STOP( basic::MPIH_PREPARE_WRITE_STRUCTURES );
408  if( pool_rank_ == MPI_OUTPUT_RANK ) {
409  PROF_START( basic::MPIH_WRITE_STRUCT );
410  core::io::silent::SilentFileData sfd;
411  sfd.strict_column_mode( true );
412  receive_and_output_structures( sfd, num_structures_to_write );
413  PROF_STOP( basic::MPIH_WRITE_STRUCT );
414  }
415  **/
416  }
417  }
418  hlevel_.debug_print_size_per_level();
419  PROF_START( basic::MPIBARRIER_END );
420  MPI_Barrier( MPI_COMM_POOL );
421  PROF_STOP( basic::MPIBARRIER_END );
422  return best_index;
423  }
424  void
425  MPIHPool_RMSD::send_receive_and_write_structures(bool i_am_a_winning_rank, core::pose::Pose const& pose) {
426 
427  //look in buf_.neighbor_addresses_ for addresses that match my query
428  bool use_batch_write_out = false;
429  if( use_batch_write_out ) {
430  if( tr.visible() ) tr.Debug << "attempting to use batch write-out to dump decoys" << std::endl;
431  int print = 0;
432  if( i_am_a_winning_rank ) print = 1;
433  int* have_structure_to_print = new int[ pool_npes_ ];
434  MPI_Allgather( &print, 1, MPI_INT, have_structure_to_print, 1, MPI_INT, MPI_COMM_POOL );
435  if( i_am_a_winning_rank ) {
436  core::Size num_nbrs = 0;
437  core::Size min_ranking_proc = pool_npes_;
438 
439  for( core::Size ii = 0; ii < pool_npes_; ii++ ) { //determine which group you belong to
440  if( tr.visible() ) tr.Debug << "looking at position " << ii << " of " << pool_npes_ << std::endl;
441  if( have_structure_to_print[ ii ] == 1 ) {
442  bool same_unresolved_addr = true;
443  for( core::Size jj = 0; jj < hlevel_.nlevels(); jj++ ) {
444  if( best_address_[ jj + 1 ] != buf_.neighbor_addresses_[ (ii * hlevel_.nlevels()) + jj ] ) same_unresolved_addr = false;
445  }
446  if( same_unresolved_addr ) { //only winning ranks with same unresolved addresses care
447  //DEBUG OUTPUT
448  if( tr.visible() ) {
449  tr.Debug << "my current query address: ";
450  for( core::Size kk = 1; kk <= best_address_.size(); kk++ ) {
451  tr.Debug << best_address_[ kk ] << " ";
452  }
453  tr.Debug << " compared to nbr rank: " << ii << " ";
454  for( core::Size kk = 0; kk < hlevel_.nlevels(); kk++ ) {
455  tr.Debug << buf_.neighbor_addresses_[ (ii * hlevel_.nlevels()) + kk ] << " ";
456  }
457  tr.Debug << "are the same and will be grouped together for writing to file" << std::endl;
458  }
459  //END DEBUG OUTPUT
460  if( ii < min_ranking_proc ) min_ranking_proc = ii;
461  num_nbrs++;
462  } else {
463  if( tr.visible() ) tr.Debug << "no matching addresses, will not group any processes together" << std::endl;
464  have_structure_to_print[ ii ] = 0;
465  }
466  }
467  }
468  //how many levels to print?
469  if( pool_rank_ == min_ranking_proc ) {
470  if( tr.visible() ) tr.Debug << "I am the min ranking proc: " << pool_rank_ << " and i am printing out these rank neighbors: ";
471  for( core::Size ii = 0; ii < num_nbrs; ii++ ) {
472  if( have_structure_to_print[ ii ] == 1 ) {
473  if( tr.visible() ) {
474  tr.Debug << " nbr rank: " << ii << " ";
475  for( core::Size jj = 0; jj < hlevel_.nlevels(); jj++ ) {
476  tr.Debug << buf_.neighbor_addresses_[ ( ii * hlevel_.nlevels() ) + jj ] << " ";
477  }
478  }
479  }
480  }
481  if( tr.visible() ) tr.Debug << std::endl;
484  ss->fill_struct( pose, buf_.winning_tag_ );
485  if( first_time_writing_ ) {
486  first_time_writing_ = false;
487  write_headers_to_hierarchy( ss );
488  }
489  write_decoys_to_hierarchy( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ );
490  num_nbrs--;
491  if( num_nbrs > 0 ) {
492  receive_and_output_structures( sfd, num_nbrs );
493  }
494  } else if( pool_rank_ != min_ranking_proc ) {
495  if( tr.visible() ) tr.Debug << "sending my structure to min-ranking-proc: " << min_ranking_proc << std::endl;
498  ss->fill_struct( pose, buf_.winning_tag_ ); //ek debug 7/30/10 uncomment when everything is fixed
499  send_silent_struct_to_rank( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ , min_ranking_proc );
500  }
501  }
502  } else {
503 
504  PROF_START( basic::MPIH_PREPARE_WRITE_STRUCTURES );
505  int num_structures_to_write = 0;
506  //report whether or not i have a structure to write out.
507  if( i_am_a_winning_rank ) {
508  int num_to_print = 1;
509  MPI_Reduce( &num_to_print, &num_structures_to_write, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_POOL );
510  } else {
511  int num_to_print = 0;
512  MPI_Reduce( &num_to_print, &num_structures_to_write, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_POOL );
513  }
514  PROF_STOP( basic::MPIH_PREPARE_WRITE_STRUCTURES );
515  if( pool_rank_ == MPI_OUTPUT_RANK ) {
516  //if I am the output rank, receive and write out structures
517  if( tracer_visible_ ) { tr.Debug << "expecting to write out " << num_structures_to_write << " structures" << std::endl; }
519  sfd.strict_column_mode( true );
520  PROF_START( basic::MPIH_WRITE_STRUCT );
521 
522  if( i_am_a_winning_rank ) {
523  //write out my own structure first
525  ss->fill_struct( pose, buf_.winning_tag_ );
526  if( first_time_writing_ ) {
527  first_time_writing_ = false;
528  write_headers_to_hierarchy( ss );
529  }
530  write_decoys_to_hierarchy( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ );
531  num_structures_to_write--;
532  }
533  receive_and_output_structures( sfd, num_structures_to_write );
534  PROF_STOP( basic::MPIH_WRITE_STRUCT );
535  } else {
536  //if I am not the output rank, then send my structures to the output rank
537  PROF_START( basic::MPIH_WRITE_STRUCT );
538  if( i_am_a_winning_rank ) {
541 
542  ss->fill_struct( pose, buf_.winning_tag_ ); //ek debug 7/30/10 uncomment when everything is fixed
543  send_silent_struct_to_rank( sfd, ss, buf_.winning_address_, buf_.new_level_begins_ );
544  }
545  PROF_STOP( basic::MPIH_WRITE_STRUCT );
546  }
547  }
548  }
549 
550  void
551  MPIHPool_RMSD::buf_to_address( Address & addr, int* addr_buf, core::Size index ) {
552  for( core::Size ii = 1; ii <= addr.size(); ii++ ) {
553  addr[ ii ] = addr_buf[ index + ii - 1 ];
554  }
555  }
556 
557  void
558  MPIHPool_RMSD::address_to_buf( Address & addr, int* addr_buf, core::Size index ) {
559  for( core::Size ii = 1; ii <= addr.size(); ii++ ) {
560  addr_buf[ index + ii - 1 ] = addr[ ii ];
561  }
562  }
563 
564  bool
565  MPIHPool_RMSD::is_my_structure() {
566  return ( ( buf_.candidate_nbr_index_ - 1 ) == pool_rank_);
567  }
568 
569 
570  bool
571  MPIHPool_RMSD::is_new_structure( Address & address,
574  for( core::Size ii = 1; ii <= address.size(); ii++ ) {
575  if( address[ ii ] == 0 ) {
576  if( tr.visible() ) tr.Debug << "address at level " << ii << " is 0, so is a new structure " << std::endl;
577  return true;
578  }
579  }
580  if( rmsds[ rmsds.size() ] > hlevel_.level_radius( hlevel_.nlevels() ) ) {
581  if( tr.visible() ) tr.Debug << "rms at last-level is: " << rmsds[ rmsds.size() ] << " which is greater than radius, " << hlevel_.level_radius( hlevel_.nlevels() ) << " so is a new structure" << std::endl;
582  return true;
583  }
584  if( tr.visible() ) tr.Debug << " structure is not a new structure" << std::endl;
585  return false;
586  }
587 
588  core::Real
589  MPIHPool_RMSD::resolved_level_best_rmsd( Address & addr, utility::vector1< core::Real > & rmsd ) {
590  core::Real best_rms = 0;
591  for( core::Size ii = 1; ii <= addr.size(); ii++ ) {
592  if( addr[ ii ] != 0 ) {
593  best_rms = rmsd[ ii ];
594  }
595  }
596  return best_rms;
597  }
598 
599  bool
600  MPIHPool_RMSD::is_new_structure( Address & address,
602  core::Real & rmsd ) {
603  for( core::Size ii = 1; ii <= address.size(); ii++ ) {
604  if( address[ ii ] == 0 ){
605  //tr.Debug << "address at level " << ii << " is 0, so is a new structure " << std::endl;
606  return true;
607  }
608  }
609  if( rmsd > radii[ radii.size() ] ) {
610  //tr.Debug << "rmsd at last level: " << rmsds[ rmsds.size() ] << " is greater than threshold: " << radii[ radii.size() ] << " so is a new structure " << std::endl;
611  return true;
612  }
613  //tr.Debug << "structure is not a new structure" << std::endl;
614  return false;
615  }
616 
617  void
618  MPIHPool_RMSD::print_address( Address & addr ) {
619  if( tr.visible() ) {
620  for( core::Size ii = 1; ii <= addr.size(); ii++ ) {
621  tr.Debug << addr[ ii ] << " ";
622  }
623  tr.Debug << std::endl;
624  }
625  }
626 
627  core::Size
628  MPIHPool_RMSD::find_address( Address & query, utility::vector1< Address > & addr_database ) {
629  PROF_START( basic::HIERARCHY_FIND_ADDRESS );
630  core::Size index;
631  for( index = 1; index <= addr_database.size(); index++ ) {
632  bool found_a_match = true;
633  for( core::Size jj = 1; jj <= query.size(); jj++ ) {
634  if( query[ jj ] != (( Address )addr_database[ index ])[ jj ] ) {
635  found_a_match = false;
636  }
637  }
638  if( found_a_match ) {
639  break;
640  }
641  }
642  PROF_STOP( basic::HIERARCHY_FIND_ADDRESS );
643  return index;
644  }
645 
646  core::Size
647  MPIHPool_RMSD::any_node_finished(){
648  buf_.int_buf1_[ 0 ] = current_trajectory_state_;
649  MPI_Allgather( buf_.int_buf1_, 1, MPI_INT, buf_.finished_, 1, MPI_INT, MPI_COMM_POOL );
650  core::Size num_nodes_finished = 0;
651  core::Size index_in_prog = 0;
652  for( unsigned int ii = 0; ii < pool_npes_; ii++ ) {
653  if( buf_.finished_[ ii ] == FINISHED ) {
654  num_nodes_finished++;
655  }else {
656  buf_.int_buf1_[ index_in_prog++ ] = ii;
657  }
658  }
659  if( tr.visible() ) {
660  tr.Debug << "number of nodes finished this round: " << num_nodes_finished << std::endl;
661  }
662  return num_nodes_finished;
663  }
664 
665  void
666  MPIHPool_RMSD::update_comm( core::Size newsize ) {
667  if( tr.visible() ) {
668  tr.Debug << "some trajectories finished, creating new comm with " << newsize << std::endl;
669  }
670  create_comm( buf_.int_buf1_, newsize );
671  //~buf_();
672  buf_.setup( newsize, nresidues_, nlevels_ );
673  if( current_trajectory_state_ == IN_PROGRESS ) {
674  MPI_Comm_rank( MPI_COMM_POOL, ( int* )( &pool_rank_ ) );
675  MPI_Comm_size( MPI_COMM_POOL, ( int* )( &pool_npes_ ) );
676  tr.Info << "remaining ranks has pool-size of " << pool_npes_ << " and rank: " << pool_rank_ << std::endl;
677  }
678  }
679 
680 
681  void
682  MPIHPool_RMSD::prepare_send_new_coords( bool send_coords ){
683  if( send_coords ) {
684  runtime_assert( nlevels_ == best_address_.size() );
685  for( core::Size ii = 1; ii <= best_address_.size(); ii++ ) {
686  //tr.Debug << "writing to " << (ii-1) << " of " << best_address_.size() - 1 << std::endl;
687  buf_.int_buf1_[ ii - 1 ] = best_address_[ ii ];
688  }
689  buf_.farray_to_array( 0, buf_.coords_, buf_.coords_transfer_buffer_ );
690  //only one structure at a time, index is always 0
691  } else {
692  for( core::Size ii = 0; ii < nlevels_; ii++ ){
693  buf_.int_buf1_[ ii ] = -1;
694  }
695  }
696  }
697 
698 
699  bool
700  MPIHPool_RMSD::get_next_candidate() {
701  PROF_START( basic::HIERARCHY_GET_NEXT_CANDIDATE );
702  //buf_.candidate_nbr_index_ starts from 1, equivalent to pool_rank_ + 1
703  if( buf_.candidate_nbr_index_ < (buf_.is_a_neighbor_).size() ) { //not at the end
704  core::Size itr;
705  for( itr = buf_.candidate_nbr_index_ + 1; itr <= (buf_.is_a_neighbor_).size(); itr++ ) {
706  if( buf_.is_a_neighbor_[ itr ] ) {
707  buf_.array_to_farray( buf_.memory_offset_[ itr - 1 ], buf_.candidate_coords_, buf_.coords_receiving_buffer_ );
708  for( core::Size ii = 1; ii <= nlevels_; ii++ ) {
709  buf_.candidate_address_[ ii ] = buf_.neighbor_addresses_[ ( ( itr - 1 ) * nlevels_ ) + ( ii - 1 ) ];
710  }
711  buf_.candidate_best_rmsd_ = buf_.candidate_best_rmsds_[ itr - 1 ];
712  buf_.candidate_nbr_index_ = itr;
713  if( tr.visible() ) {
714  tr.Debug << "next examining address: ";
715  for( core::Size ii = 1; ii <= buf_.candidate_address_.size(); ii++ ) {
716  tr.Debug << buf_.candidate_address_[ ii ] << " ";
717  }
718  tr.Debug << std::endl;
719  }
720  break;
721  }
722  }
723  PROF_STOP( basic::HIERARCHY_GET_NEXT_CANDIDATE );
724  if( itr > (buf_.is_a_neighbor_).size()) { return false; } // no neighbors
725  return true;
726  } else {
727  return false;
728  }
729  }
730 
731  void
732  MPIHPool_RMSD::receive_silent_struct_any_source( core::io::silent::SilentFileData& recv_ss, core::io::silent::SilentStructOP & ss, Address& ss_addr, core::Size& new_level_begins ) {
733  using namespace core::io::silent;
734  using namespace basic;
735  PROF_START( basic::HIERARCHY_RECV_COORDS );
736  recv_ss.clear();
737  std::istringstream os;
738  int string_size = 0;
739  MPI_Status stat;
740  std::string received_string;
741  int* output_info = new int[ 2 + ss_addr.size() ];
742  //maybe receive address string_size
743 
744  //tr.Debug << "receiving this many ints: " << (2 + ss_addr.size() ) << std::endl;
745  MPI_Recv( output_info, ( 2 + ss_addr.size() ), MPI_INT, MPI_ANY_SOURCE, OUTPUT_TAG, MPI_COMM_POOL, &stat );
746  string_size = output_info[ 0 ];
747  char *cbuf = new char[ string_size + 1 ];
748  int sending_rank = stat.MPI_SOURCE;
749  for( core::Size ii =1; ii <= ss_addr.size(); ii++ ) {
750  ss_addr[ ii ] = output_info[ ii ];
751  }
752  new_level_begins = output_info[ ss_addr.size() + 1 ];
753  //tr.Debug << "receiving this many chars: " << string_size << std::endl;
754  MPI_Recv( cbuf, string_size, MPI_CHAR, sending_rank, OUTPUT_TAG, MPI_COMM_POOL, &stat );
755  received_string.assign( cbuf, string_size );
756  os.str( received_string );
758  recv_ss.read_stream( os, tags, false );
759  ss = *(recv_ss.begin());
760  if( tr.visible() ) {
761  tr.Debug << "just received structure from rank: " << sending_rank
762  << " with tag " << ss->decoy_tag() << " new_level begins at: " << new_level_begins << std::endl;
763  }
764  delete[] cbuf;
765  PROF_STOP( basic::HIERARCHY_RECV_COORDS );
766  }
767 
768  void
769  MPIHPool_RMSD::send_silent_struct_to_rank( core::io::silent::SilentFileData& send_ss, core::io::silent::SilentStructOP & ss, Address& ss_addr, core::Size& new_level, core::Size receiving_rank ) {
770  using namespace core::io::silent;
771  using namespace basic;
772  PROF_START( basic::HIERARCHY_SEND_COORDS );
773  send_ss.clear();
774  std::ostringstream os;
775  send_ss._write_silent_struct( *ss, os );
776  int* output_info = new int[ 2 + ss_addr.size() ];
777  std::string decoy_tag = ss->decoy_tag();
778  int string_size = (os.str()).size();
779  output_info[ 0 ] = string_size;
780  for( core::Size ii = 1; ii <= ss_addr.size(); ii++ ) {
781  output_info[ ii ] = ss_addr[ ii ];
782  }
783  //runtime_assert( new_level > 0 );
784  output_info[ ss_addr.size() + 1 ] = new_level;
785  if ( tr.visible() ) {
786  tr.Debug << "sending decoy with tag: " << ss->decoy_tag() << " new-level is " << new_level << std::endl;
787  }
788  MPI_Send(output_info, ( 2 + ss_addr.size() ), MPI_INT, receiving_rank, OUTPUT_TAG, MPI_COMM_POOL );
789  //tr.Debug << "sending this many chars: " << string_size << std::endl;
790  MPI_Send(const_cast<char*> (os.str().data()), string_size, MPI_CHAR, receiving_rank, OUTPUT_TAG, MPI_COMM_POOL);
791  PROF_STOP( basic::HIERARCHY_SEND_COORDS );
792  }
793 
794  void
795  MPIHPool_RMSD::send_silent_struct_to_rank( core::io::silent::SilentFileData& send_ss, core::io::silent::SilentStructOP & ss, Address& ss_addr, core::Size& new_level ) {
796  send_silent_struct_to_rank( send_ss, ss, ss_addr, new_level, MPI_OUTPUT_RANK );
797  }
798 
799 
800 
801  void
802  MPIHPool_RMSD::scan_output_and_setup_to_receive(){ //determines the number of new neighbors
803  buf_.num_new_neighbors_ = 0;
804  PROF_START( basic::HIERARCHY_SETUP_TO_RECV );
805  buf_.is_a_neighbor_.resize( pool_npes_, false );
806 
807  runtime_assert(buf_.is_a_neighbor_.size() == pool_npes_ );
808  //convert to proper address
809  Address nbr_address( nlevels_, 0 );
810 
811  for( unsigned int i = 0; i < pool_npes_; i++ ) {
812  if( buf_.neighbor_addresses_[ (i * nlevels_ ) ] != -1 ) { //quick indication that no pose discovered this round
813  for( core::Size ii = 1; ii <= nbr_address.size(); ii++ ) {
814  nbr_address[ ii ] = buf_.neighbor_addresses_[ ( i * nlevels_ ) + ( ii - 1 ) ];
815  }
816  if( hlevel_.address_exists_in_cache( nbr_address ) ) {
817  buf_.is_a_neighbor_[ i + 1 ] = true;
818  buf_.num_new_neighbors_++;
819  } else {
820  buf_.is_a_neighbor_[ i + 1 ] = false;
821  }
822  } else {
823  buf_.is_a_neighbor_[ i + 1 ] = false;
824  }
825  }
826  if( tr.visible() ) {
827  tr.Debug << "my current query address: ";
828  for( core::Size ii = 1; ii <= best_address_.size(); ii++ ) {
829  tr.Debug << best_address_[ ii ] << " ";
830  }
831  tr.Debug << std::endl;
832 
833  hlevel_.debug_print_size_per_level();
834  tr.Debug << "these are the addresses I will be examining: ";
835  for( core::Size ii = 1; ii <= buf_.is_a_neighbor_.size(); ii++ ) {
836  if( buf_.is_a_neighbor_[ ii ] == 1 ) {
837  for( core::Size jj = 1; jj <= nbr_address.size(); jj++ ) {
838  tr.Debug << buf_.neighbor_addresses_[ ( (ii-1) * nlevels_ ) + ( jj - 1 ) ] << " ";
839  }
840  tr.Debug << ", ";
841  }
842  }
843  tr.Debug << std::endl;
844  }
845 
846  core::Size index = 0;
847  core::Size receive_counts = 0;
848  //setup coords to receive stuff
849  for( core::Size ii = 0; ii < (pool_npes_ * nlevels_ ); ii+=nlevels_ ) {
850  if ( buf_.neighbor_addresses_[ ii ] > 0 ) { //is it a valid address?
851  buf_.int_buf1_[ index ] = ( nresidues_ * 3 ); //counts
852  buf_.memory_offset_[ index ] = receive_counts; //displacement
853  receive_counts += ( nresidues_ * 3 );
854  }else{
855  buf_.int_buf1_[ index ] = 0;
856  buf_.memory_offset_[ index ] = 0;
857  }
858  index++;
859  }
860  PROF_STOP( basic::HIERARCHY_SETUP_TO_RECV );
861  }
862 
863  void
864  MPIHPool_RMSD::set_discovered_out( std::string const& newout){
865  new_decoys_out_ = newout;
866  }
867 
868  std::string const&
869  MPIHPool_RMSD::get_discovered_out(){
870  return new_decoys_out_;
871  }
872 
873  void
874  MPIHPool_RMSD::set_transition_threshold( core::Real threshold ){
875  //no need for this function in this class??
876  runtime_assert( threshold >= 0 );
877  }
878 
879  void
880  MPIHPool_RMSD::set_nresidues( core::Size nres ){
881  nresidues_ = nres;
882  }
883 
884  core::Size
885  MPIHPool_RMSD::get_nresidues(){
886  return nresidues_;
887  }
888 
889  void
890  MPIHPool_RMSD::write_headers_to_hierarchy( core::io::silent::SilentStructOP& ss ) {
891  hlevel_.write_headers_to_hierarchy( ss );
892  }
893 
894  void
895  MPIHPool_RMSD::write_decoys_to_hierarchy( core::io::silent::SilentFileData& sfd, core::io::silent::SilentStructOP& ss, Address& ss_addr, core::Size new_level_begins ) {
896  using namespace basic;
897  PROF_START( basic::WRITE_DECOYS_TO_HIERARCHY );
898  Address tmp_addr = ss_addr;
899  if( new_level_begins == 0 ) new_level_begins = tmp_addr.size() + 1;
900  if( tr.visible() ) {
901  tr.Debug << "writing decoy to hierarchy: " << ss->decoy_tag() << " ";
902  for( core::Size ii =1; ii <= ss_addr.size(); ii++ ) {
903  tr.Debug << ss_addr[ ii ] << " ";
904  }
905  tr.Debug << " new_level: " << new_level_begins << std::endl;
906  }
907 
908  for( core::Size ii = new_level_begins; ii <= tmp_addr.size(); ii++ ) {
909  tmp_addr[ ii ] = 0;
910  }
911  core::Size index = new_level_begins;
912  do {
913  std::string file_in_hierarchy = hlevel_.lib_full_path( tmp_addr );
914  utility::file::FileName file( file_in_hierarchy );
915  if( !utility::file::file_exists( file_in_hierarchy ) ){
916  utility::file::create_directory_recursive( file.path() );
917  utility::file::create_blank_file( file.name() );
918  std::ofstream os;
919  os.open( (file.name()).c_str() );
920  ss->print_header( os ); //temporary fix
921  os.close();
922  }
923  if( tr.visible() ) {
924  tr.Debug << index << " writing decoy " << ss->decoy_tag() << " to file: " << file_in_hierarchy << std::endl;
925  }
926  std::ofstream os;
927  os.open( (file_in_hierarchy).c_str(), std::ios::app );
928  sfd._write_silent_struct( *ss, os, false );
929  os.close();
930  if( index <= ss_addr.size() ) {
931  tmp_addr[ index ] = ss_addr[ index ];
932  }
933  //index++;
934  } while( index++ <= ss_addr.size() );
935  sfd.write_silent_struct( *ss, new_decoys_out_, false );
936  //tr.Debug << "done dumping structure into hierarchy " << std::endl;
937  PROF_STOP( basic::WRITE_DECOYS_TO_HIERARCHY );
938  }
939 
940  void
941  MPIHPool_RMSD::max_cache_size( core::Size max_cache ) {
942  hlevel_.max_cache_size( max_cache );
943  }
944 
945  void
946  MPIHPool_RMSD::finalize(){
947  PROF_START( basic::FINALIZE );
948  MPI_Barrier( MPI_COMM_POOL );
949  current_trajectory_state_ = FINISHED;
950  tr.Info << "rank: " << pool_rank_ << " calling finalized on trajectory " << std::endl;
951  core::Size num_nodes_finished = any_node_finished();
952  if( tr.visible() ) {
953  tr.Debug << "dumping hierarchy (debug) " << std::endl;
954  hlevel_.debug_print_hierarchy();
955  }
956  if( num_nodes_finished > 0 ) {
957  tr.Info << "num nodes finished this round: " << num_nodes_finished << " of " << pool_npes_ << std::endl;
958  if( ( pool_npes_ - num_nodes_finished ) == 0 ) {
959  return;
960  }
961  update_comm( pool_npes_ - num_nodes_finished );
962  }
963  PROF_STOP( basic::FINALIZE );
964  }
965 
966 
967  void
968  MPIHPool_RMSD::initialize(){
969  using namespace core;
970  using namespace basic::options;
971  using namespace basic::options::OptionKeys;
972  using namespace basic;
973 
974  tr.Info << "initializing hierarchical MPI class" << std::endl;
975  //time setup
976  clock_t start_setup = clock();
977  rank_ = 0;
978  npes_ = 0;
979 
980  PROF_START( basic::CHECK_COMM_SIZE );
981  MPI_Comm_rank( MPI_COMM_WORLD, ( int* )( &rank_ ) );
982  MPI_Comm_size( MPI_COMM_WORLD, ( int* )( &npes_ ) );
983  PROF_STOP( basic::CHECK_COMM_SIZE );
984 
985  PROF_START( basic::INITIALIZE );
986  pool_rank_ = rank_;
987  pool_npes_ = npes_;
988  //tr.Debug << "just checked rank: it's " << rank_ << " out of " << npes_ << std::endl;
989 
990  //initialize radii
991  level_radii_ = option[ cluster::K_radius ]();
992  runtime_assert( level_radii_.size() == nlevels_ );
993 
996  core::Size min_client_rank, new_size;
997  if( jd2 ) {
998  min_client_rank = jd2->min_client_rank();
999  new_size = npes_ - min_client_rank;
1000  } else {
1001  utility_exit_with_message("cannot use MPIHPool_RMSD without using the MPIFileBufJobDistributor! try again!");
1002  }
1003 
1004  pool_size_ = Pool_RMSD::size();
1005  nresidues_ = Pool_RMSD::natom();
1006  current_address_.resize( nlevels_, 0 );
1007 
1008  PROF_STOP( basic::INITIALIZE );
1009  //create new MPI_COMM_WORLD based on sub-set of nodes
1010  tr.Info << "initializing comm" << std::endl;
1011  PROF_START( basic::MPICOMMCREATION );
1012  int index = 0;
1013  if( tr.visible() ) tr.Debug << "now trying to set up new communicator" << std::endl;
1014  buf_.setup( pool_npes_, nresidues_, nlevels_ );
1015  for(unsigned int ii = min_client_rank; ii < npes_; ii++){
1016  (buf_.int_buf1_)[ index++ ] = ii;
1017  if( tr.visible() ) tr.Debug << "including " << ii << " in the new pool communicator " << std::endl;
1018  }
1019 
1020  //initialize all num_slave dependent buffers for MPI transfers
1021  MPI_Group pool_group, all;
1022  int returnval;
1023 
1024  returnval = MPI_Comm_group( MPI_COMM_WORLD, &all);
1025  if ( returnval != MPI_SUCCESS ) {
1026  utility_exit_with_message("failed in creating a new communicator!");
1027  }
1028  if( tr.visible() ) tr.Debug << "set up MPI_COMM_WORLD group" << std::endl;
1029 
1030  returnval = MPI_Group_incl( all, (new_size), buf_.int_buf1_, &pool_group );
1031  if ( returnval != MPI_SUCCESS ) {
1032  utility_exit_with_message("failed in creating a new communicator!");
1033  }
1034  if( tr.visible() )tr.Debug << "created the pool group" << std::endl;
1035 
1036  returnval = MPI_Comm_create( MPI_COMM_WORLD, pool_group, &MPI_COMM_POOL );
1037  if ( returnval != MPI_SUCCESS ) {
1038  utility_exit_with_message("failed in creating a new communicator!");
1039  }
1040  if( tr.visible() ) tr.Debug << "created the MPI_COMM_POOL communicator " << std::endl;
1041 
1042  if( rank_ >= min_client_rank ) {
1043  MPI_Comm_rank( MPI_COMM_POOL, ( int* )( &pool_rank_ ) );
1044  MPI_Comm_size( MPI_COMM_POOL, ( int* )( &pool_npes_ ) );
1045  if( tr.visible() ) tr.Debug << "new ranks from MPI_COMM_POOL: " << pool_rank_ << " " << pool_npes_ << std::endl;
1046  }
1047 
1048  PROF_STOP( basic::MPICOMMCREATION );
1049  if( tr.visible() ) tr.Debug << "finished initializing, setting up new comm, and ready to go!" << std::endl;
1050  PROF_START( basic::INITIALIZE_HIERARCHY );
1051  tracer_visible_ = tr.visible();
1052 
1053  Address universal_address( nlevels_, 0 );
1054  universal_address[ 1 ] = 1;
1055 
1057  tr.Info << "checking for hierarchy..." << std::endl;
1058  if( tr.visible() ) tr.Debug << "does pool exist? " << hlevel_.lib_full_path( universal_address ) << " : " << hlevel_.pool_exists( universal_address ) << std::endl;
1059  if( !hlevel_.pool_exists( universal_address ) ) { //checks for directory assumed to contain hierarchy
1060  if( rank_ >= min_client_rank ) {
1061  MPI_Barrier( MPI_COMM_POOL );
1062  tr.Info << "hierarchical pool doesn't exist. creating hierarchy " << std::endl;
1063  //create a hierarchical pool to go along with this silent-file
1064  if( option[ mc::read_structures_into_pool ].user() ) {
1065  if( !utility::file::file_exists( option[ mc::read_structures_into_pool ]() ) ) {
1066  utility_exit_with_message("you specified a file for option mc::read_structures_into_pool that does not exist! try again!");
1067  } else {
1069  sfd.strict_column_mode( true );
1070  core::pose::Pose pose;
1071  bool successfully_read_file = false;
1072  core::Size max_attempts = 100;
1073  core::Size attempt = 0;
1074  while( !successfully_read_file && attempt < max_attempts ) {
1075  successfully_read_file = sfd.read_file( hlevel_.filename() );
1076  }
1077  if ( successfully_read_file ) {
1079  for( core::Size ii = 1; ii <= tags.size(); ii++ ) {
1081  ss = sfd[ tags[ ii ] ];
1082  std::string best_decoy;
1084  Address best_addr;
1085  core::Size new_level_starts_at = 0;
1086  hlevel_.evaluate( (*ss), best_decoy, best_rms, best_addr );
1087  for( core::Size jj = 1; jj <= best_addr.size(); jj++ ) {
1088  if( best_addr[ jj ] == 0 ) {
1089  if( new_level_starts_at == 0 ) new_level_starts_at = jj;
1090  best_addr[ jj ] = hlevel_.pool_size( best_addr, jj - 1 ) + 1;
1091  }
1092  }
1093  if( rank_ == min_client_rank ) {
1094  hlevel_.add_new( (*ss), tags[ ii ], best_addr, true, 2 );
1095  } else {
1096  hlevel_.add_new( (*ss), tags[ ii ], best_addr, false, 2 );
1097  }
1098  }
1099  } else {
1100  utility_exit_with_message("cannot read silent file. thus cannot create hierarchy! filename: " + hlevel_.filename());
1101  }
1102  }
1103  }
1104  }
1105  } else {
1106  if( rank_ >= min_client_rank ) {
1107  tr.Info << "pool already exists!! loading file as top-level" << std::endl;
1108  Pool_RMSD_OP top_level_pool = new Pool_RMSD( hlevel_.lib_full_path( universal_address ) );
1109  if( tr.visible() ) tr.Debug << "finished reading pool from file: " << hlevel_.lib_full_path( universal_address ) << std::endl;
1110  hlevel_.fill_top_level( top_level_pool );
1111  }
1112  if( rank_ >= min_client_rank ) {
1113  MPI_Barrier( MPI_COMM_POOL );
1114  }
1115  }
1116  tr.Info << "this rank: " << pool_rank_ << " starting out with " << hlevel_.top_level_pool_size() << " structures in the top-most-level of hierarchy" << std::endl;
1117  if( tr.visible() ) {
1118  hlevel_.debug_print_size_per_level();
1119  tr.Debug << "END PRINTING SIZE PER LEVEL FOR INITIATION" << std::endl;
1120  }
1121  clock_t finish_setup = clock();
1122  double time_to_setup = ( double(finish_setup) - double(start_setup) ) / CLOCKS_PER_SEC;
1123  tr << "time to setup " << pool_npes_ << " nodes: " << time_to_setup << std::endl;
1124  PROF_STOP( basic::INITIALIZE_HIERARCHY );
1125  }
1126 
1127  bool
1128  MPIHPool_RMSD::is_in_neighborhood( Address & q_address, Address & ref_address ) {
1129 
1130  runtime_assert( q_address.size() == ref_address.size() );
1131 
1132  for( core::Size ii = 1; ii <= ref_address.size(); ii++ ) {
1133  if( q_address[ ii ] != 0 && ref_address[ ii ] != 0 && //either is un-resolved
1134  q_address[ ii ] != ref_address[ ii ] ) {
1135  return false;
1136  }
1137  }
1138  return true;
1139  }
1140 
1141  void
1142  MPIHPool_RMSD::address_to_string( Address & address_buf, core::Size, std::string & address_tag ) {
1143  address_tag = "";
1144  std::ostringstream q;
1145  q.width(5);
1146  q.fill('0');
1147  for( core::Size ii = 1; ii <= address_buf.size(); ii++) {
1148  q << address_buf[ ii ];
1149  address_tag += q.str() + ".";
1150  }
1151  //tr.Debug << "resulting address from address_to_string is: " << address_tag << std::endl;
1152  }
1153 
1154  void
1155  MPIHPool_RMSD::string_to_address( Address & address_buf, core::Size index, std::string & address_tag ){
1156  //erase prefix, should be something like "c." or "new."
1157  core::Size pos = 0, newpos;
1158  std::string subtag="";
1159  core::Size prefix_pos = address_tag.find('.',0);
1160  address_tag.erase(0,prefix_pos);
1161  core::Size counted_levels = 0;
1162  while( counted_levels < nlevels_ ) {
1163  newpos = address_tag.find( '.', pos+1 );
1164  if( newpos == address_tag.length() ) {
1165  break; // no more "."
1166  }
1167 
1168  subtag = address_tag.substr( pos + 1, (newpos-pos - 1) );
1169 
1170  pos = newpos;
1171  core::Size first_nonzero_pos = subtag.find_first_not_of('0',0);
1172  if (first_nonzero_pos > 0 ) {
1173  subtag.erase(0,first_nonzero_pos);
1174  }
1175  address_buf[ index + counted_levels ] = atoi(subtag.c_str());
1176  counted_levels++;
1177  }
1178  while( counted_levels < nlevels_ ){
1179  address_buf[ index + counted_levels ] = 0;
1180  counted_levels++;
1181  }
1182 
1183  }
1184 
1185  void
1186  MPIHPool_RMSD::assign_tag( Address& address_tag, core::Size id_num, std::string & newtag ){
1187  std::ostringstream q;
1188  for( core::Size ii = 1; ii <= address_tag.size(); ii++ ) {
1189  if( address_tag[ ii ] == 0 ) {
1190  core::Size prev_level = ii - 1;
1191  q << hlevel_.pool_size( address_tag, (prev_level) ) + 1 << ".";
1192  if( tr.visible() ) tr.Debug << " at level " << ii << " assigning " << ( hlevel_.pool_size( address_tag, ii - 1 ) + 1 ) << std::endl;
1193  } else {
1194  q << address_tag[ ii ] << ".";
1195  }
1196  }
1197  q << id_num;
1198  newtag = "new." + q.str();
1199  //tr.Debug << " id_num: " << id_num << " newtag: " << newtag << " address: ";
1200  //for( core::Size ii = 1; ii <= address_tag.size(); ii++ ) {
1201  //tr.Debug << address_tag[ ii ] << " ";
1202  //}
1203  //tr.Debug << std::endl;
1204 
1205  }
1206 
1207  void
1208  MPIHPool_RMSD::assign_tag( std::string const& address_tag, core::Size assigned_id_num, std::string& newtag ){
1209  std::ostringstream q;
1210  q << assigned_id_num;
1211  newtag = "new." + address_tag + "." + q.str();
1212  //tr.Debug << "from address: " << address_tag << " id: " << assigned_id_num << " produced: " << newtag << std::endl;
1213  }
1214 
1215 
1216  void MPIHPool_RMSD::create_comm( int* ranks_to_include, int new_size ){
1217  int returnval;
1218  MPI_Group new_pool_group, old_pool_group;
1219  MPI_Comm dup_pool_comm;
1220  PROF_START( basic::MPICOMMCREATION );
1221  MPI_Comm_dup( MPI_COMM_POOL, &dup_pool_comm );
1222  returnval = MPI_Comm_group( dup_pool_comm, &old_pool_group );
1223  assert(returnval == MPI_SUCCESS );
1224 
1225  returnval = MPI_Group_incl( old_pool_group, new_size, ranks_to_include, &new_pool_group );
1226  assert(returnval == MPI_SUCCESS );
1227 
1228  returnval = MPI_Comm_create( dup_pool_comm, new_pool_group, &MPI_COMM_POOL );
1229  assert(returnval == MPI_SUCCESS );
1230  PROF_STOP( basic::MPICOMMCREATION );
1231 
1232  }
1233 
1234 
1235 #endif //useMPI?
1236 }
1237 }
1238 }