Rosetta 3.5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ParallelTempering.cc
Go to the documentation of this file.
1 // -*- mode:c++;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
2 // vi: set ts=2 noet:
3 //
4 // (c) Copyright Rosetta Commons Member Institutions.
5 // (c) This file is part of the Rosetta software suite and is made available under license.
6 // (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
7 // (c) For more information, see http://www.rosettacommons.org. Questions about this can be
8 // (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
9 
10 /// @file protocols/canonical_sampling/ParallelTemperingMover.cc
11 /// @brief ParallelTempering methods implemented
12 /// @author
13 
14 
15 // Unit Headers
18 
19 
20 // protocols headers
23 #include <protocols/moves/Mover.hh>
27 
29 
30 //#include <protocols/jd2/JobDistributor.hh>
31 #include <protocols/jd2/util.hh>
32 #include <protocols/jd2/Job.hh>
33 
34 // core headers
35 #include <basic/options/option_macros.hh>
36 #include <basic/options/keys/in.OptionKeys.gen.hh>
37 #include <basic/options/keys/packing.OptionKeys.gen.hh>
38 
39 #include <basic/Tracer.hh>
40 
43 
45 #include <core/types.hh>
46 
47 // numeric headers
48 #include <numeric/random/random.hh>
49 
50 // utility headers
51 #include <utility/file/file_sys_util.hh>
52 #include <utility/pointer/owning_ptr.hh>
53 #include <utility/tag/Tag.hh>
54 #include <utility/io/ozstream.hh>
55 #include <utility/io/izstream.hh>
56 #include <ObjexxFCL/string.functions.hh>
57 
58 // basic headers
59 #include <basic/prof.hh>
60 
61 // C++ Headers
62 #include <cmath>
63 
64 
65 using basic::T;
66 using basic::Error;
67 using basic::Warning;
68 
69 static basic::Tracer tr( "protocols.canonical_sampling.ParallelTempering" );
70 static numeric::random::RandomGenerator RG(3227547);
71 
72 
74 
75 //Mike: when you want to remove these Macros... leave them at least here as comment - since they provide documentation
77  if ( !options_registered_ ) {
78  options_registered_ = true;
80  }
81 }
82 
83 namespace protocols {
84 namespace canonical_sampling {
85 using namespace core;
86 
90 }
91 
94  return new ParallelTempering;
95 }
96 
99  return "ParallelTempering";
100 }
101 
103  rank_( -1 ),
104  last_energies_( NULL ),
105  rank2tlevel_( NULL ),
106  tlevel2rank_( NULL ),
107  start_time_(0),
108  total_mpi_wait_time_(0)
109 {
110 #ifndef USEMPI
111  utility_exit_with_message( "ParallelTempering requires MPI build" );
112 #endif
113 #ifdef USEMPI
114  mpi_comm_ = MPI_COMM_NULL;
115 #endif
116  set_defaults();
117 }
118 
120  Parent( other ),
121  exchange_schedules_( other.exchange_schedules_ ),
122  last_exchange_schedule_( other.last_exchange_schedule_ ),
123  last_energies_( NULL ),
124  rank2tlevel_( NULL ),
125  tlevel2rank_( NULL ),
126  start_time_( other.start_time_ ),
127  total_mpi_wait_time_( other.total_mpi_wait_time_ )
128 {
129 #ifndef USEMPI
130  utility_exit_with_message( "ParallelTempering requires MPI build" );
131 #endif
132 #ifdef USEMPI
133  set_mpi_comm( other.mpi_comm() );
134 #endif
135 }
136 
138  if ( &other == this ) return *this;
140  Parent::operator=( other );
141 #ifdef USEMPI
142  set_mpi_comm( other.mpi_comm() );
143 #endif
144  Size const nlevels( n_temp_levels() );
145  allocate_buffers( nlevels );
146  runtime_assert( exchange_schedules_[ 0 ].size() == nlevels );
147  return *this;
148 }
149 
152 }
153 
154 
155 void
157  pose::Pose & pose,
158  protocols::canonical_sampling::MetropolisHastingsMover const & metropolis_hastings_mover,
159  core::Size cycle //default=0; non-zero if trajectory is restarted
160 ) {
161  tr.Trace << "ParallelTempering::initialize_simul1... " << std::endl;
162  Parent::initialize_simulation(pose, metropolis_hastings_mover,cycle);
163  tr.Trace << "ParallelTempering::initialize_simul2... " << std::endl;
164 #ifdef USEMPI
165  set_mpi_comm( jd2::current_mpi_comm() );
166 #endif
167  tr.Trace << "ParallelTempering::initialize_simul3... " << std::endl;
168  Size const nlevels( n_temp_levels() );
169  allocate_buffers( nlevels );
170  setup_exchange_schedule( nlevels );
172 
174  start_time_ = clock() / basic::SHRINK_FACTOR;
176  tr.Trace << "Initialized ParallelTempering! " << std::endl;
177 }
178 
179 void
181  pose::Pose& pose,
183 ) {
185  Parent::finalize_simulation( pose, mhm );
186 
187  if (rank() == 0) {
188  tr << "Temperature Exchange Frequencies:" << std::endl;
189  std::streamsize oldwidth( tr.width() );
190  std::streamsize oldprecision( tr.precision() );
191  std::ios_base::fmtflags oldflags( tr.flags() );
192  tr.width(5);
193  tr.precision(3);
194  tr.setf(std::ios_base::fixed);
195  for (core::Size i=0; i<n_temp_levels()-1; ++i) {
196  std::pair<int, int> elem(i, i+1);
197  //Original code (line below) fails on some versions of GCC (4.4.3 and 4.3.4 reported by users)
198  //core::Real frequency(core::Real(exchange_accepts_[elem])/core::Real(exchange_attempts_[elem]));
199  //this replacement code is reported to work for those compilers
200  core::Real const a = exchange_accepts_[elem];
201  core::Real const b = exchange_attempts_[elem];
202  core::Real const frequency(a/b);
203  tr << temperature(i+1) << " <-> " << temperature(i+2) << ": " << frequency
204  << " (" << exchange_accepts_[elem] << " of " << exchange_attempts_[elem] << ")" << std::endl;
205  }
206  tr.width(oldwidth);
207  tr.precision(oldprecision);
208  tr.flags(oldflags);
209  }
210 
211  core::Real const clock_factor( ( (double) basic::SHRINK_FACTOR ) / CLOCKS_PER_SEC );
212  clock_t total_time(clock()/basic::SHRINK_FACTOR - start_time_);
213  core::Real fraction_waiting = total_mpi_wait_time_*clock_factor / ( total_time*clock_factor );
214  tr << "Spent " << fraction_waiting*100 << "% time waiting for MPI temperature exchange ("
215  << total_mpi_wait_time_*clock_factor << " seconds out of " << total_time*clock_factor << " total)" << std::endl;
216 }
217 
219  tr.Trace << "ParallelTempering::setup_exchange_schedule for " << nlevels << std::endl;
220  exchange_schedules_.clear();
221  ExchangeSchedule list;
222 
223  //0<->1, 2<->3...
224  list.clear();
225  for (int i=0; i < (int)nlevels-1; i+=2) {
226  std::pair<int, int> elem(i, i+1);
227  list.push_back(elem);
228  if (rank() == 0) {
229  exchange_attempts_[elem] = 0;
230  exchange_accepts_[elem] = 0;
231  }
232  }
233  exchange_schedules_.push_back(list);
234 
235  //1<->2, 3<->4...
236  list.clear();
237  for (int i=1; i<(int)nlevels-1; i+=2) {
238  std::pair<int, int> elem(i, i+1);
239  list.push_back(elem);
240  if (rank() == 0) {
241  exchange_attempts_[elem] = 0;
242  exchange_accepts_[elem] = 0;
243  }
244  }
245  exchange_schedules_.push_back(list);
246 }
247 
249  tr.Trace << "ParallelTempering::allocate_buffers for " << nlevels << std::endl;
251  last_energies_ = new double[nlevels];
252  rank2tlevel_ = new int[nlevels];
253  tlevel2rank_ = new int[nlevels];
254  for ( Size i=0; i<nlevels; ++i ) {
255  rank2tlevel_[ i ] = i+1; //tlevels enumerate 1..N
256  tlevel2rank_[ i ] = i; //ranks enumerate 0...N-1
257  }
258 }
259 
261  if ( last_energies_ ) {
262  delete [] last_energies_;
263  delete [] rank2tlevel_;
264  delete [] tlevel2rank_;
265  }
266  last_energies_ = NULL;
267  rank2tlevel_ = NULL;
268  tlevel2rank_ = NULL;
269 }
270 
274  if ( !time_for_temp_move() ) return temperature();
275 
276  // Size const nlevels( n_temp_levels() );
277 #ifdef USEMPI
278  //get infomation
279  double last_energy = score;
280  clock_t time_before_MPI = clock();
281  MPI_Gather(&last_energy, 1, MPI_DOUBLE, last_energies_, 1, MPI_DOUBLE, 0, mpi_comm() );
282 
283  //change the T_tag and T_rev at node0
285 
286  //public the new T_tag
287  int new_tlevel;
288  MPI_Scatter(rank2tlevel_, 1, MPI_INT, &new_tlevel, 1, MPI_INT, 0, mpi_comm() );
289  total_mpi_wait_time_ += ( clock() - time_before_MPI ) / basic::SHRINK_FACTOR;
290  set_current_temp( new_tlevel );
291 #endif
292  return temperature();
293 }
294 
295 void
299 
300  for ( Size i=1; i<=ex.size(); i++ ) {
301  Size const rank1=tlevel2rank_[ex[i].first];
302  Size const rank2=tlevel2rank_[ex[i].second];
303  Real const invT1( 1.0 / temperature( rank2tlevel_[rank1] ) );
304  Real const invT2( 1.0 / temperature( rank2tlevel_[rank2] ) );
305  Real const deltaE( energies[rank2]-energies[rank1] );
306  Real const delta( ( invT1 - invT2 ) * deltaE );
307 
308  ++exchange_attempts_[ex[i]];
309 
310  if ( RG.uniform() < std::min( 1.0, std::exp( std::max(-40.0, -delta) ) ) ) {
311  Size tmp;
312 
313  //Swap tlevel
314  tmp=rank2tlevel_[rank1];
315  rank2tlevel_[rank1]=rank2tlevel_[rank2];
316  rank2tlevel_[rank2]=tmp;
317 
318  //Swap ranks
319  tmp=tlevel2rank_[ex[i].first];
320  tlevel2rank_[ex[i].first]=tlevel2rank_[ex[i].second];
321  tlevel2rank_[ex[i].second]=tmp;
322 
323  ++exchange_accepts_[ex[i]];
324  }
325  }
326 }
327 
330 {
331  return "ParallelTempering";
332 }
333 
336 {
338 }
339 
342 {
343  return new ParallelTempering;
344 }
345 
346 void
348  utility::tag::TagPtr const tag,
350  protocols::filters::Filters_map const & filters,
351  protocols::moves::Movers_map const & movers,
352  pose::Pose const & pose
353 ) {
354  Parent::parse_my_tag( tag, data, filters, movers, pose );
355 }
356 
357 
358 /// handling of options including command-line
360 }
361 
362 /// @brief Assigns user specified values to primitive members using command line options
364  using namespace basic::options;
365  using namespace basic::options::OptionKeys;
366  using namespace core;
367  using namespace basic::options;
368  using namespace basic::options::OptionKeys;
369  using namespace core;
371 }
372 
373 #ifdef USEMPI
374 void ParallelTempering::set_mpi_comm( MPI_Comm const& mpi_comm ) {
375  if ( mpi_comm != MPI_COMM_NULL ) {
376  tr.Trace << "ParallelTempering::Duplicate mpi-communicator" << std::endl;
377  MPI_Comm_dup( mpi_comm, &mpi_comm_ );
378  MPI_Comm_rank( mpi_comm_, &rank_ );
379  int size;
380  MPI_Comm_size( mpi_comm_, &size );
381  runtime_assert( size == n_temp_levels() );
382  } else {
383  tr.Trace << "ParallelTempering::Duplicate mpi-communicator" << std::endl;
384  mpi_comm_ = MPI_COMM_NULL;
385  }
386 }
387 #endif
388 
389 } //moves
390 } //protocols
391