Rosetta 3.5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
UnfoldedStateEnergyCalculatorMPIWorkPoolJobDistributor.cc
Go to the documentation of this file.
1 // -*- mode:c++;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
2 // vi: set ts=2 noet:
3 //
4 // (c) Copyright Rosetta Commons Member Institutions.
5 // (c) This file is part of the Rosetta software suite and is made available under license.
6 // (c) The Rosetta software is protocolsoped by the contributing members of the Rosetta Commons.
7 // (c) For more information, see http://www.rosettacommons.org. Questions about this can be
8 // (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
9 
10 /// @file protocols/UnfoldedStateEnergyCalculator/UnfoldedStateEnergyCalculatorJobDistributor.cc
11 /// @brief Job distributor for UnfoldedStateEnergyCalculator
12 /// @author P. douglas Renfrew (renfrew@unc.edu)
13 
14 // MPI headers
15 #ifdef USEMPI
16 #include <mpi.h> //keep this first
17 #endif
18 
19 // Unit headers
21 #ifdef USEMPI
23 #endif
25 
26 // Package headers
27 // AUTO-REMOVED #include <protocols/moves/Mover.hh>
28 
29 // Project headers
31 
32 // Utility headers
33 #include <basic/Tracer.hh>
34 // AUTO-REMOVED #include <basic/options/option.hh>
35 #ifdef USEMPI
36 #include <utility/exit.hh>
37 #endif
38 #include <utility/assert.hh>
39 
40 // C++ headers
41 #include <string>
42 
43 #include <utility/vector1.hh>
44 #include <basic/options/keys/OptionKeys.hh>
45 
46 
47 static basic::Tracer TR("protocols.UnfoldedStateEnergyCalculator.UnfoldedStateEnergyCalculatorMPIWorkPoolJobDistributor");
48 
49 namespace protocols {
50 namespace unfolded_state_energy_calculator {
51 
52 using namespace basic::options;
53 using namespace basic::options::OptionKeys;
54 
55 ///@brief ctor
57  MPIWorkPoolJobDistributor()
58 {}
59 
60 ///@brief dtor (don't put anything in here)
62 {}
63 
64 ///@brief unforntunatly this is pretty much copied from the MPIWorkPoolJobDistributor, I should make that more compartmentalized
65 void
67 {
68 #ifdef USEMPI
69 
70  using namespace core;
71  using namespace core::scoring;
72  using namespace protocols::jd2;
73 
74  runtime_assert( rank_ == 0 );
75 
76  // local arrays to store incomming messages
77  int slave_data( 0 );
78  double slave_data_vector[ n_score_types ];
79  EMapVector slave_data_emap;
80  char slave_tlc_vector[LENGTH_TLC];
81  std::string slave_tlc_string;
82 
83 
84  MPI_Status status;
85 
86  // set first job to assign
88 
89  // Job Distribution Loop
90  while ( next_job_to_assign_ != 0 ) {
91  TR << "Master Node: Waiting for job requests..." << std::endl;
92  MPI_Recv( &slave_data, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
93  TR << "Master Node: Received message from " << status.MPI_SOURCE << " with tag " << status.MPI_TAG << std::endl;
94 
95  // decide what to do based on message tag
96  switch ( status.MPI_TAG ) {
97  case NEW_JOB_ID_TAG:
98  TR << "Master Node: Sending new job id " << next_job_to_assign_ << " to node " << status.MPI_SOURCE << " with tag " << NEW_JOB_ID_TAG << std::endl;
99  MPI_Send( &next_job_to_assign_, 1, MPI_INT, status.MPI_SOURCE, NEW_JOB_ID_TAG, MPI_COMM_WORLD );
101  break;
102  case BAD_INPUT_TAG:
103  TR << "Master Node: Received job failure message for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
104  bad_job_id_ = slave_data;
106  break;
107  case JOB_SUCCESS_TAG:
108  TR << "Master Node: Received job success message for job id " << slave_data << " from node " << status.MPI_SOURCE << " blocking till output is done " << std::endl;
109  MPI_Send( &next_job_to_assign_, 1, MPI_INT, status.MPI_SOURCE, JOB_SUCCESS_TAG, MPI_COMM_WORLD );
110  MPI_Recv( &slave_data, 1, MPI_INT, status.MPI_SOURCE, JOB_SUCCESS_TAG, MPI_COMM_WORLD, &status);
111  TR << "Master Node: Received job output finish messege for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
112  break;
114  TR << "Master Node: Received unfolded energy data message for job id " << slave_data << " from node " << status.MPI_SOURCE << " waiting for data " << std::endl;
115  // clear local data
116  slave_data_emap.clear(); slave_tlc_string.clear();
117 
118  // send responce to send data
119  MPI_Send( &slave_data, 1, MPI_INT, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD );
120 
121  // get vector of tlc data
122  MPI_Recv( &slave_data_vector, LENGTH_TLC, MPI_CHAR, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD, &status);
123 
124  // convert to string
125  for( Size i(0); i < LENGTH_TLC; ++i ) {
126  slave_tlc_string += slave_tlc_vector[i];
127  }
128 
129  // get vector of energy data
130  MPI_Recv( &slave_data_vector, n_score_types, MPI_DOUBLE, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD, &status);
131 
132  // convert to emap
133  for ( Size i( 1 ); i <= n_score_types; ++i ) {
134  slave_data_emap[ ScoreType( i ) ] = slave_data_vector[ i - 1 ];
135  }
136 
137  // push into vector
138  master_add_unfolded_energy_data( slave_tlc_string, slave_data_emap );
139  TR << "Master Node: Finished receiving energy data message for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
140  break;
142  TR << "Master Node: Received unfolded energy terms message for job id " << slave_data << " from node " << status.MPI_SOURCE << " waiting for data " << std::endl;
143  // clear emap
144  slave_data_emap.clear();
145 
146  // send responce to send data
147  MPI_Send( &slave_data, 1, MPI_INT, status.MPI_SOURCE, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD );
148 
149  // get vector of data
150  MPI_Recv( &slave_data_vector, n_score_types, MPI_DOUBLE, status.MPI_SOURCE, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD, &status);
151 
152  // convert to emap
153  for ( Size i( 1 ); i <= n_score_types; ++i ) {
154  slave_data_emap[ ScoreType( i ) ] = slave_data_vector[ i - 1 ];
155  }
156 
157  // push into vector
158  master_set_energy_terms( slave_data_emap );
159  TR << "Master Node: Finished receiving energy terms message for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
160  break;
161  }
162  }
163  TR << "Master Node: Finished handing out jobs" << std::endl;
164 
165  core::Size n_nodes_left_to_spin_down( npes_ - 1 ); // don't have to spin down self
166 
167  // Node Spin Down loop
168  while ( n_nodes_left_to_spin_down > 0 ) {
169  TR << "Master Node: Waiting for " << n_nodes_left_to_spin_down << " slaves to finish jobs" << std::endl;
170  MPI_Recv( &slave_data, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
171  TR << "Master Node: Recieved message from " << status.MPI_SOURCE << " with tag " << status.MPI_TAG << std::endl;
172 
173  // decide what to do based on message tag
174  switch ( status.MPI_TAG ) {
175  case NEW_JOB_ID_TAG:
176  TR << "Master Node: Sending spin down signal to node " << status.MPI_SOURCE << std::endl;
177  MPI_Send( &next_job_to_assign_, 1, MPI_INT, status.MPI_SOURCE, NEW_JOB_ID_TAG, MPI_COMM_WORLD );
178  n_nodes_left_to_spin_down--;
179  break;
180  case BAD_INPUT_TAG:
181  break;
182  case JOB_SUCCESS_TAG:
183  TR << "Master Node: Received job success message for job id " << slave_data << " from node " << status.MPI_SOURCE << " blocking till output is done " << std::endl;
184  MPI_Send( &next_job_to_assign_, 1, MPI_INT, status.MPI_SOURCE, JOB_SUCCESS_TAG, MPI_COMM_WORLD );
185  MPI_Recv( &slave_data, 1, MPI_INT, status.MPI_SOURCE, JOB_SUCCESS_TAG, MPI_COMM_WORLD, &status);
186  TR << "Master Node: Received job output finish messege for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
187  break;
189  TR << "Master Node: Received unfolded energy data message for job id " << slave_data << " from node " << status.MPI_SOURCE << " waiting for data " << std::endl;
190  // clear local data
191  slave_data_emap.clear(); slave_tlc_string.clear();
192 
193  // send responce to send data
194  MPI_Send( &slave_data, 1, MPI_INT, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD );
195 
196  // get vector of tlc data
197  MPI_Recv( &slave_data_vector, LENGTH_TLC, MPI_CHAR, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD, &status);
198 
199  // convert to string
200  for( Size i(0); i < LENGTH_TLC; ++i ) {
201  slave_tlc_string += slave_tlc_vector[i];
202  }
203 
204  // get vector of data
205  MPI_Recv( &slave_data_vector, n_score_types, MPI_DOUBLE, status.MPI_SOURCE, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD, &status);
206 
207  // convert to emap
208  for ( Size i( 1 ); i <= n_score_types; ++i ) {
209  slave_data_emap[ ScoreType( i ) ] = slave_data_vector[ i - 1 ];
210  }
211 
212  // push into vector
213  master_add_unfolded_energy_data( slave_tlc_string, slave_data_emap );
214  TR << "Master Node: Finished receiving energy data message for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
215  break;
217  TR << "Master Node: Received unfolded energy terms message for job id " << slave_data << " from node " << status.MPI_SOURCE << " waiting for data " << std::endl;
218  // clear emap
219  slave_data_emap.clear();
220 
221  // send responce to send data
222  MPI_Send( &slave_data, 1, MPI_INT, status.MPI_SOURCE, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD );
223 
224  // get vector of data
225  MPI_Recv( &slave_data_vector, n_score_types, MPI_DOUBLE, status.MPI_SOURCE, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD, &status);
226 
227  // convert to emap
228  for ( Size i( 1 ); i <= n_score_types; ++i ) {
229  slave_data_emap[ ScoreType( i ) ] = slave_data_vector[ i - 1 ];
230  }
231 
232  // push into vector
233  master_set_energy_terms( slave_data_emap );
234  TR << "Master Node: Finished receiving energy terms message for job id " << slave_data << " from node " << status.MPI_SOURCE << std::endl;
235  break;
236  }
237  }
238  TR << "Master Node: Finished sending spin down signals to slaves" << std::endl;
239 
240  // calc average unweighted energies for all amino acids in the map
241  for ( uem_iter i( unweighted_energies_map_.begin() ), e( unweighted_energies_map_.end() ); i != e; ++i ) {
242  TR << "Calculating averages for " << i->first << std::endl;
243  calc_all_averages( i->second, energy_terms_ );
244  }
245 
246 #endif
247 }
248 
249 void
251 {
252  if ( rank_ == 0 ) {
253  master_add_unfolded_energy_data( tlc, scores );
254  } else {
255  slave_add_unfolded_energy_data( tlc, scores );
256  }
257 }
258 
259 void
261 {
262  unweighted_energies_map_[tlc].push_back( scores );
263 }
264 
265 void
267 {
268 #ifdef USEMPI
269  using namespace core;
270  using namespace core::scoring;
271 
272  TR << "Slave Node " << rank_ << ": Requesting to send unfolded energy data to master" <<std::endl;
273 
274  int empty_data( 0 );
275  MPI_Status status;
276  double data_vector[ n_score_types ];
277  char tlc_vector[LENGTH_TLC];
278 
279  // send message that we want to send a vector of energies
280  MPI_Send( &current_job_id_, 1, MPI_INT, 0, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD );
281 
282  // get responce from master
283  MPI_Recv( &empty_data, 1, MPI_INT, 0, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD, &status );
284  TR << "Slave Node " << rank_ << ": Sending unfolded energy data to master" <<std::endl;
285 
286  // convert string to vector of chars
287  for ( Size i(0); i < LENGTH_TLC; ++i ) {
288  tlc_vector[i] = tlc[i];
289  }
290 
291  // send vector to master
292  MPI_Send( &tlc_vector, 3, MPI_CHAR, 0, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD );
293 
294  // convert EMapVector to vector of doubles
295  for ( Size i( 1 ); i <= n_score_types; ++i ) {
296  data_vector[ i - 1 ] = scores[ ScoreType( i ) ];
297  }
298 
299  // send vector to master
300  MPI_Send( &data_vector, n_score_types, MPI_DOUBLE, 0, UNFOLDED_ENERGY_DATA_TAG, MPI_COMM_WORLD );
301 
302  TR << "Slave Node " << rank_ << ": Sent unfolded energy data to master" <<std::endl;
303 #endif
304 }
305 
306 void
308 {
309  using namespace protocols::jd2;
310 
311  if ( rank_ == 0 ) {
312  master_set_energy_terms( weights );
313  } else {
314  slave_set_energy_terms( weights );
315  }
316 }
317 
318 void
320 {
321  using namespace core;
322  using namespace core::scoring;
323 
324  // get weights
325  energy_terms_ = weights;
326 
327  // for each energy term in the EMapVector
328  for ( Size i( 1 ); i <= n_score_types; ++i ) {
329 
330  // if the energy term has a non-zero weight set it to one
331  if ( energy_terms_[ ScoreType( i ) ] > 0 ) {
332  energy_terms_[ ScoreType( i ) ] = 1;
333  } else if ( energy_terms_[ ScoreType( i ) ] < 0 ) {
334  energy_terms_[ ScoreType( i ) ] = -1;
335  }
336  }
337 }
338 
339 void
341 {
342 #ifdef USEMPI
343  using namespace core;
344  using namespace core::scoring;
345 
346  TR << "Slave Node " << rank_ << ": Requesting to send unfolded energy terms to master" <<std::endl;
347 
348  int empty_data( 0 );
349  MPI_Status status;
350  double data_vector[ n_score_types ];
351 
352  // send message that we want to send a vector of energies
353  MPI_Send( &current_job_id_, 1, MPI_INT, 0, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD );
354 
355  // get responce from master
356  MPI_Recv( &empty_data, 1, MPI_INT, 0, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD, &status );
357  TR << "Slave Node " << rank_ << ": Sending unfolded energy terms to master" <<std::endl;
358 
359  // convert EMapVector to vector of doubles
360  for ( Size i( 1 ); i <= n_score_types; ++i ) {
361  data_vector[ i - 1 ] = weights[ ScoreType( i ) ];
362  }
363 
364  // send vector to master
365  MPI_Send( &data_vector, n_score_types, MPI_DOUBLE, 0, UNFOLDED_ENERGY_TERMS_TAG, MPI_COMM_WORLD );
366 
367  TR << "Slave Node " << rank_ << ": Sent unfolded energy terms to master" <<std::endl;
368 #endif
369 }
370 
371 
372 } // UnfoldedStateEnergyCalculator
373 } // protocols