27 #include <basic/options/option.hh>
28 #include <basic/options/keys/els.OptionKeys.gen.hh>
30 #include <basic/Tracer.hh>
35 static basic::Tracer
TR(
"protocols.wum2.MPI_EndPoint");
37 MPI_EndPoint::MPI_EndPoint( mpi::communicator world,
function< uint64_t () > role_available_mem )
39 EndPoint( role_available_mem ) {
41 clearcommand_channel_.get<0>() = world_.irecv( mpi::any_source, CLEARCOMMAND, clearcommand_channel_.get<1>() );
42 statusrequest_channel_.get<0>() = world_.irecv( mpi::any_source, STATUSREQUEST, statusrequest_channel_.get<1>() );
45 void MPI_EndPoint::check_and_act_status_request(
function<
void ( StatusResponse & ,
int ) > functor ) {
46 if ( statusrequest_channel_.get<0>().test().is_initialized() ) {
50 uint64_t free_mem = role_available_mem_();
51 uint64_t available_work = statusrequest_channel_.get<1>().max_outgoing_wu_mem;
52 m.incoming_allocated = free_mem < available_work ? free_mem : available_work;
53 m.outq_current_mem = outq_.current_mem();
55 tuple< mpi::request, StatusResponse > tmp = make_tuple ( req , m );
56 outbound_statusresponse_.push_back( tmp );
57 std::list< tuple< mpi::request, StatusResponse > >::reverse_iterator itr = outbound_statusresponse_.rbegin();
58 itr->get<0>() = world_.isend( statusrequest_channel_.get<1>().rank , STATUSRESPONSE, itr->get<1>() );
61 functor( itr->get<1>(), statusrequest_channel_.get<1>().rank );
63 statusrequest_channel_.get<0>() = world_.irecv( mpi::any_source, STATUSREQUEST, statusrequest_channel_.get<1>() );
67 void MPI_EndPoint::listen_wu_sendrecv( StatusResponse & r,
int requesting_node ) {
73 receive_wus( requesting_node, r.incoming_allocated );
74 send_wus( requesting_node, r.outq_current_mem );
77 void MPI_EndPoint::send_status_request(
int rank ) {
81 m.max_outgoing_wu_mem = max_outgoing_wu_mem();
82 tuple< mpi::request, StatusRequest > tmpa = make_tuple(req, m);
83 outbound_statusrequest_.push_back( tmpa );
84 std::list< tuple< mpi::request, StatusRequest > >::reverse_iterator jtr = outbound_statusrequest_.rbegin();
85 jtr->get<0>() = world_.isend( rank, STATUSREQUEST, jtr->get<1>() );
89 tuple< mpi::request, StatusResponse > tmp = make_tuple(reqn, n);
90 inbound_statusresponse_.push_back( tmp );
91 std::list< tuple< mpi::request, StatusResponse > >::reverse_iterator itr = inbound_statusresponse_.rbegin();
92 itr->get<0>() = world_.irecv( rank, STATUSRESPONSE, itr->get<1>() );
94 open_status_.insert(rank);
98 void MPI_EndPoint::act_on_status_response(
function<
bool ( StatusResponse & r )> f ){
99 std::list< tuple< mpi::request, StatusResponse > >::iterator itr = inbound_statusresponse_.begin();
100 while( itr != inbound_statusresponse_.end() ) {
101 if ( itr->get<0>().test().is_initialized() ) {
102 if( f( itr->get<1>() ) ) {
105 open_status_.erase( itr->get<1>().rank ) ;
106 itr = inbound_statusresponse_.erase( itr );
116 bool MPI_EndPoint::initiate_wu_sendrecv( StatusResponse & r ) {
121 bool can_send =
false;
122 bool can_recv =
false;
124 if( r.outq_current_mem == 0 ) {
126 }
else if (r.outq_current_mem > role_available_mem_() ) {
131 receive_wus( r.rank, r.outq_current_mem );
134 if( r.incoming_allocated == 0 ) {
143 send_wus( r.rank, r.incoming_allocated );
145 return can_send && can_recv;
148 void MPI_EndPoint::cleanup_reqs() {
150 outbuf_.cleanup_reqs();
151 std::vector< WorkUnitSP > tmp = inbuf_.cleanup_reqs();
152 for( std::vector< WorkUnitSP >::iterator itr = tmp.begin(); itr != tmp.end(); itr++ ) {
153 if( (*itr)->prioritize() ) {
154 inq_.push_front( *itr );
156 inq_.push_back( *itr );
160 std::list< tuple< mpi::request, StatusResponse > >::iterator itr = outbound_statusresponse_.begin();
161 while( itr != outbound_statusresponse_.end() ){
162 if ( itr->get<0>().test().is_initialized() ) {
164 itr = outbound_statusresponse_.erase( itr );
170 std::list< tuple< mpi::request, StatusRequest > >::iterator jtr = outbound_statusrequest_.begin();
171 while( jtr != outbound_statusrequest_.end() ) {
172 if ( jtr->get<0>().test().is_initialized() ) {
174 jtr = outbound_statusrequest_.erase( jtr );
181 void MPI_EndPoint::receive_wus(
int rank, uint64_t mem_size ) {
182 if( mem_size != 0 ) {
183 WUQueueBuffer::riterator itr = inbuf_.allocate_buffer( mem_size );
184 itr->get<1>() = world_.irecv( rank, WORKUNITVEC, *(itr->get<2>()) );
188 void MPI_EndPoint::send_wus(
int rank, uint64_t mem_size ) {
189 using namespace basic::options;
190 using namespace basic::options::OptionKeys;
192 if( mem_size != 0 ) {
193 std::vector< WorkUnitSP > tmp;
194 uint64_t current_size = 0;
204 (option[OptionKeys::els::num_traj]() / option[OptionKeys::els::traj_per_master]() ) +
205 (!( option[OptionKeys::els::num_traj]() % option[OptionKeys::els::traj_per_master]() == 0 )) ;
206 int num_slaves = (world_.size() - num_masters)/num_masters;
207 while( outq_.size_front() && current_size + outq_.size_front() <= mem_size ) {
208 current_size += outq_.size_front();
209 tmp.push_back( outq_.pop_front() );
213 if( outq_.size() < num_slaves || counter >= 2 )
break;
216 WUQueueBuffer::riterator itr = outbuf_.allocate_buffer( current_size );
217 itr->get<2>()->
insert( itr->get<2>()->end(), tmp.begin(), tmp.end() );
218 itr->get<1>() = world_.isend( rank, WORKUNITVEC, *(itr->get<2>()) );
222 void MPI_EndPoint::check_and_act_clearcommand() {
227 if ( clearcommand_channel_.get<0>().test().is_initialized() ) {
230 clearcommand_channel_.get<0>() = world_.irecv( mpi::any_source, CLEARCOMMAND, clearcommand_channel_.get<1>() );