Rosetta 3.5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RigidChunkClaimer.cc
Go to the documentation of this file.
1 // -*- mode:c++;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
2 // vi: set ts=2 noet:
3 //
4 // (c) Copyright Rosetta Commons Member Institutions.
5 // (c) This file is part of the Rosetta software suite and is made available under license.
6 // (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
7 // (c) For more information, see http://www.rosettacommons.org. Questions about this can be
8 // (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
9 
10 /// @file TopologyBroker
11 /// @brief top-class (Organizer) of the TopologyBroker mechanism
12 /// @detailed responsibilities:
13 /// @author Oliver Lange
14 
15 // Unit Headers
17 
18 // Package Headers
20 
21 // Project Headers
23 #include <core/pose/Pose.hh>
24 #include <core/pose/util.hh>
25 #include <core/id/DOF_ID.hh>
28 // AUTO-REMOVED #include <core/kinematics/Exceptions.hh>
29 #include <protocols/loops/Loop.hh>
35 // AUTO-REMOVED #include <core/io/pdb/pose_io.hh>
38 #include <basic/options/option.hh>
40 
41 // Utility headers
42 #include <utility/excn/Exceptions.hh>
43 #include <basic/Tracer.hh>
44 #include <numeric/random/random.hh>
45 #include <numeric/numeric.functions.hh>
46 #include <numeric/xyz.functions.hh>
47 #include <basic/options/keys/loops.OptionKeys.gen.hh>
48 
49 #include <vector>
50 // AUTO-REMOVED #include <iterator>
51 
52 #include <protocols/jd2/Job.hh>
53 #include <utility/vector1.hh>
54 
55 //Auto Headers
59 
60 
61 
62 static basic::Tracer tr("protocols.topo_broker", basic::t_info);
63 static numeric::random::RandomGenerator RG(188428234);
64 
65 namespace protocols {
66 namespace topology_broker {
67 
68 using namespace core;
69 
70 //helper function
72  using core::Size;
73  using namespace basic::options;
74 
75  loops::LoopsOP loops = comparative_modeling::loops_from_alignment( query_pose.total_residue(), align, min_loop_size );
76 
77  // this is now done in select_parts()
78  // randomly grow loops by N residues (4 is a good amount)
79  return loops->invert( query_pose.total_residue() );
80 }
81 
82 // having AdjacentJumps causes problems in fix_internal_coords_of_siblings.
83 // can have more than two child on C(=0) for instance
85  : bExclusive_( true ),
86  bAllowAdjacentJumps_( false ),
87  bUseInputPose_( true ),
88  bRigidInRelax_( false ) {}
89 
91  input_pose_( input_pose ),
92  centroid_input_pose_( input_pose ),
93  rigid_core_( rigid ),
94  bExclusive_( true ),
95  bAllowAdjacentJumps_( false ),
96  bUseInputPose_( true ),
97  bRigidInRelax_( false )
98 {
101 }
102 
105  bUseInputPose_ = true;
106  bUseThreadingJobLoops_ = false;
107  min_loop_size_ = 0;
108  using namespace basic::options;
109  random_grow_loops_by_ = option[ OptionKeys::loops::random_grow_loops_by ]();
110 }
111 
113  if ( typeid( cm ) == typeid( CM_SuggestFixResidue ) ) {
114  CM_SuggestFixResidue& msg = dynamic_cast< CM_SuggestFixResidue& >( cm );
115 
116  //find good residue
117  if ( !current_rigid_core_.size() ) return;
118  msg.good_fix_pos_ = current_rigid_core_.begin()->start() + numeric::nint( 0.5 * current_rigid_core_.begin()->size() ) - 1;
119  msg.received_by_me( this );
120  }
121 }
122 
124  std::string tag,
125  std::istream& is
126 )
127 {
130 
131  if ( tag == "pdb" || tag == "PDB" || tag == "pdb:" || tag == "PDB_FILE" ) {
132  std::string file;
133  is >> file;
136  file );
137  runtime_assert( input_pose_.is_fullatom() );
138  } else if ( tag == "REGION" ) {
139  loops::SerializedLoopList loops = reader.read_pose_numbered_loops_file( is, type(), false /*no strict checking */ );
140  rigid_core_ = loops::Loops( loops );
141  } else if ( tag == "region_file" || tag == "REGION_FILE" ) {
142  std::string file;
143  is >> file;
144  std::ifstream infile( file.c_str() );
145 
146  if (!infile.good()) {
147  utility_exit_with_message( "[ERROR] Error opening RBSeg file '" + file + "'" );
148  }
149  loops::SerializedLoopList loops = reader.read_pose_numbered_loops_file( infile, file, false /*no strict checking */ );
150  rigid_core_ = loops::Loops( loops ); // <==
151  } else if ( tag == "loop_file" || tag == "LOOP_FILE" ) {
152  std::string file;
153  is >> file;
154  std::ifstream infile( file.c_str() );
155 
156  if (!infile.good()) {
157  utility_exit_with_message( "[ERROR] Error opening RBSeg file '" + file + "'" );
158  }
159  loops::SerializedLoopList loops = reader.read_pose_numbered_loops_file( infile, file, false /*no strict checking */ );
160  protocols::loops::Loops loop_defs = loops::Loops( loops ); // <==
161  loop_defs = loop_defs.invert( input_pose_.total_residue() );
162  tr << "Rigid core: " << input_pose_.total_residue() << std::endl << loop_defs << std::endl;
163  rigid_core_ = loop_defs;
164  } else if ( tag == "NO_USE_INPUT_POSE" ) {
165  bUseInputPose_ = false;
166  } else if ( tag == "USE_THREADING_LOOPS" ) {
167  bUseThreadingJobLoops_ = true;
168  } else if ( tag == "MIN_LOOP_SIZE" ) {
169  is >> min_loop_size_;
170  } else if ( tag == "KEEP_FLEXIBLE" ) {
171  runtime_assert( false );
172  //right now not supported
173  // have problems with JumpClaims being reissued ...
174  // in this line: current_jumps_.push_back( foreign_claim.clone() ); //o
175  // jumps get added to our own list ... in finalize_claims they will be send back to the Broker, who will call allow_claim again for this jump
176  bExclusive_ = false;
177  } else if ( tag == "RANDOM_GROW_LOOP_BY" ) {
178  is >> random_grow_loops_by_;
179  } else if ( tag == "RIGID_IN_RELAX" ) {
180  bRigidInRelax_ = true;
181  }
182  else return Parent::read_tag( tag, is );
183  return true;
184 }
185 
187  tr.Debug << type() << " initialized with input_pdb: " << input_pose_.sequence() << " and regions " << rigid_core_ << std::endl;
190 }
191 
194  int attempts = 0;
195  if ( rigid_core_.size() == 0 ) return;//nothing to select
196  while ( current_rigid_core_.size() == 0 && attempts < 50 ) {
197  ++attempts;
199  it != eit; ++it ) {
200  if ( RG.uniform() >= it->skip_rate() ) {
202  }
203  }
204  }
205  if ( current_rigid_core_.size() == 0 ) {
207  }
208  if ( random_grow_loops_by_ > 0 ) {
209  core::Size nres( current_rigid_core_[ current_rigid_core_.size() ].stop() + 200 ); //it doesn't matter for this where exactly nres is.
210  loops::Loops loops( current_rigid_core_.invert( nres ) );
211  loops.grow_all_loops( nres, random_grow_loops_by_ );
212  tr.Info << "Enlarged loops: " << std::endl;
213  tr.Info << loops << std::endl;
214  current_rigid_core_ = loops.invert( nres );
215  }
216 }
217 
218 ///@detail generate exclusive backbone claims for each residue of the rigid-chunk
219 /// jumps are not exclusive and are added later in final_claims --- want to reuse other jumps if possible
221  using namespace loops;
222  tr.Trace << "rigid chunk -- generate claim " << std::endl;
223 
224  //stochastically select rigid_core ( if skip-rate is set >0, otherwise all loops are selected )
225  tr.Trace << "region selected: " << current_rigid_core_ << std::endl;
226 
227  // new_claims.push_back( new CutBiasClaim( *this ) ); we don't need this claim type --- always call manipulate_cut_bias
228  for ( Loops::const_iterator loop_it = current_rigid_core_.begin(); loop_it != current_rigid_core_.end(); ++loop_it ) {
229  for ( Size pos = loop_it->start(); pos <= loop_it->stop(); ++pos ) {
230  new_claims.push_back( new BBClaim( this, pos, DofClaim::EXCLUSIVE ) );
231  }
232  }
233 }
234 
236  select_parts(); //set members current_XXX
238  current_jumps_.clear();
239 }
240 
242  //should we read input structures dofs?
243  tr.Debug << "New decoy:" << std::endl;
244  if ( bUseInputPose_ ) {
245  input_pose_ = pose;
248 
249  // use loops from ThreadingJob ???
250  if ( bUseThreadingJobLoops_ ) {
251  using namespace protocols::jd2;
252  protocols::comparative_modeling::ThreadingJobCOP job = dynamic_cast< protocols::comparative_modeling::ThreadingJob const* >( JobDistributor::get_instance()->current_job()->inner_job().get() );
253  if ( job ) {
254  tr.Debug << "------------------found ThreadingJob ... get loops " << std::endl;
256  }
257  } //bUseThreading
258  tr.Debug << "RigidChunk defined for " << rigid_core_ << std::endl;
259  }
260  new_decoy();
261 }
262 
263 bool RigidChunkClaimer::allow_claim( DofClaim const& foreign_claim ) {
264  if ( foreign_claim.owner() == this ) return true; // always allow your own claims!
265 
266  // check foreign claim
267  if ( foreign_claim.type() == DofClaim::BB && current_rigid_core_.is_loop_residue( foreign_claim.pos( 1 ) ) ) {
268  if ( bExclusive_ ) { // if we want exclusive claim this is not acceptable
269  return false;
270  } else {
271  // allow only the weakest claim. We want to initialize ourselves... don't know if we need to be so restrictive!
272  if ( !(foreign_claim.right() == DofClaim::CAN_INIT) ) return false;
273  }
274  } // DofClaim::BB
275 
276  if ( foreign_claim.type() == DofClaim::JUMP ) {
277  runtime_assert( current_jump_calculator_ );
278  if ( current_jump_calculator_->irrelevant_jump( foreign_claim.pos( 1 ), foreign_claim.pos( 2 ) ) ) return true;
279  if ( !current_jump_calculator_->good_jump( foreign_claim.pos( 1 ), foreign_claim.pos( 2 ) ) ) {
280  return false;
281  } else if ( bExclusive_ ) { // but probably a good jump --- since it has a physical reason.
282  //reclaim the claim
283  current_jumps_.push_back( new JumpClaim( this, foreign_claim.pos( 1 ), foreign_claim.pos( 2 ), DofClaim::EXCLUSIVE ) );
284  return false;
285  } else {
286  current_jumps_.push_back( foreign_claim.clone() ); //ok - remember this jump, since it connects rigid1 and rigid2
287  }
288  } // DofClaim::JUMP
289 
290  if ( foreign_claim.type() == DofClaim::CUT) {
291  for ( loops::Loops::const_iterator region = current_rigid_core_.begin(); region != current_rigid_core_.end(); ++region ) {
292  if (foreign_claim.pos( 1 ) >= region->start() && foreign_claim.pos( 1 ) < region->stop() ) // cut claim can be at the chunk end
293  return false; // no cuts within our rigid-core boundaries
294  }
295  }
296  return true;
297 }
298 
300  tr.Warning << "[WARNING] RigidChunkClaimer couldn't get " << was_declined << std::endl;
301  return false; // no tolerance here --- don't accept any of these
302 }
303 
305  DofClaims extra_jumps;
306  current_jump_calculator_->generate_rigidity_jumps( this, extra_jumps );
307  std::copy( extra_jumps.begin(), extra_jumps.end(), std::back_inserter( current_jumps_ ) );
308  std::copy( current_jumps_.begin(), current_jumps_.end(), std::back_inserter( new_claims ) );
309 }
310 
311 void fix_internal_coords_of_siblings( pose::Pose& pose, pose::Pose const& ref_pose, id::AtomID atom, id::AtomID ref_atom ) {
312  runtime_assert( atom.rsd() >= 1 && atom.rsd() <= pose.total_residue() );
313  runtime_assert( pose.conformation().atom_tree().has( atom ) );
314  runtime_assert( ref_pose.conformation().atom_tree().has( ref_atom ) );
315 
316  bool has_par1( pose.conformation().atom_tree().atom( atom ).parent() );
317  bool ref_has_par1( ref_pose.conformation().atom_tree().atom( ref_atom ).parent() );
318 
319  //folding direction matters for the angle we have to set...hence find the parent atoms and get the angle
320  core::id::AtomID par1O;
321  core::id::AtomID ref_par1O;
322  if ( has_par1 && ref_has_par1 ) {
323  par1O=pose.conformation().atom_tree().atom( atom ).parent()->id();
324  std::string const & aname(pose.residue(par1O.rsd()).atom_name(par1O.atomno()));
325  ref_par1O=core::id::AtomID( ref_pose.residue( par1O.rsd() ).atom_index( aname ), par1O.rsd() );
326  } else {
327  tr.Warning << "cannot fix internal coords of " << atom << " in RigidChunk because 1st parent is missing " << std::endl;
328  return;
329  }
330  bool has_par2( pose.conformation().atom_tree().atom( par1O ).parent() );
331  bool ref_has_par2( ref_pose.conformation().atom_tree().atom( ref_par1O ).parent() );
332  core::id::AtomID par2O;
333  core::id::AtomID ref_par2O;
334  if ( has_par2 && ref_has_par2 ) {
335  par2O=pose.conformation().atom_tree().atom( par1O ).parent()->id();
336  std::string const & aname(pose.residue(par2O.rsd()).atom_name(par2O.atomno()));
337  ref_par2O=core::id::AtomID( ref_pose.residue( par2O.rsd() ).atom_index( aname ), par2O.rsd() );
338  } else {
339  tr.Warning << "cannot fix internal coords of " << atom << " in RigidChunk because 2nd parent is missing " << std::endl;
340  return;
341  }
342  runtime_assert( ref_pose.conformation().atom_tree().has( ref_par1O ) );
343  runtime_assert( ref_pose.conformation().atom_tree().has( ref_par2O ) );
344  runtime_assert( pose.conformation().atom_tree().has( par1O ) );
345  runtime_assert( pose.conformation().atom_tree().has( par2O ) );
346 
347  core::Real angle( numeric::angle_radians( ref_pose.xyz( ref_atom ), ref_pose.xyz( ref_par1O ), ref_pose.xyz( ref_par2O ) ) );
348  tr.Trace << "ref angle direct: " << angle << std::endl;
349  pose.conformation().set_bond_angle( par2O, par1O, atom, angle );
350 
351  id::DOF_ID torsion_offset_dof( atom, id::PHI );
352  id::DOF_ID ref_torsion_offset_dof( ref_atom, id::PHI );
353  core::Real value( ref_pose.conformation().atom_tree().dof( ref_torsion_offset_dof ) );
354  pose.conformation().set_dof( torsion_offset_dof, value );
355 }
356 
357 void fix_mainchain_connect( pose::Pose& pose, pose::Pose const& ref_pose, core::Size upper_residue ) {
358  core::conformation::Residue const & prev_rsd( ref_pose.residue( upper_residue-1 ) );
359  core::conformation::Residue const & rsd( ref_pose.residue( upper_residue ) );
360  core::Size const nbb_prev( prev_rsd.n_mainchain_atoms() );
361  core::id::AtomID bbM1 ( prev_rsd.mainchain_atom( nbb_prev-2 ), upper_residue-1 );
362  core::id::AtomID bb0 ( prev_rsd.mainchain_atom( nbb_prev-1 ), upper_residue-1 );
363  core::id::AtomID bb1 ( prev_rsd.mainchain_atom( nbb_prev ), upper_residue-1 );
364  core::id::AtomID bb2 ( rsd.mainchain_atom( 1 ), upper_residue );
365  core::id::AtomID bb3 ( rsd.mainchain_atom( 2 ), upper_residue );
366  core::id::AtomID bb4 ( rsd.mainchain_atom( 3 ), upper_residue );
367 
368  core::conformation::Residue const & ref_resi = ref_pose.residue( upper_residue );
369  tr.Trace << "mainchain torsion: ref: " << ref_resi.mainchain_torsion( 1 ) << " atom-tree: "
370  << ref_pose.conformation().torsion_angle( bb1, bb2, bb3, bb4 ) << std::endl;
371 
372  core::conformation::Residue const & resi = pose.residue( upper_residue );
373  tr.Trace << "mainchain torsion (before): conf: " << resi.mainchain_torsion( 1 ) << " atom-tree: "
374  << pose.conformation().torsion_angle( bb1, bb2, bb3, bb4 ) << std::endl;
375 
376  pose.conformation().set_bond_length( bb1, bb2, ref_pose.conformation().bond_length( bb1, bb2 ) );
377  pose.conformation().set_bond_angle ( bb0, bb1, bb2, ref_pose.conformation().bond_angle( bb0, bb1, bb2 ) );
378  pose.conformation().set_bond_angle ( bb1, bb2, bb3, ref_pose.conformation().bond_angle( bb1, bb2, bb3 ) );
379  pose.conformation().set_torsion_angle( bbM1, bb0, bb1, bb2, ref_pose.conformation().torsion_angle( bbM1, bb0, bb1, bb2 ) );
380  pose.conformation().set_torsion_angle( bb0, bb1, bb2, bb3, ref_pose.conformation().torsion_angle( bb0, bb1, bb2, bb3 ) );
381  pose.conformation().set_torsion_angle( bb1, bb2, bb3, bb4, ref_pose.conformation().torsion_angle( bb1, bb2, bb3, bb4 ) );
382 
383  core::conformation::Residue const & new_resi = pose.residue( upper_residue ); //this should trigger update of coords and torsions
384  tr.Trace << "mainchain torsion (after): conf: " << new_resi.mainchain_torsion( 1 ) << " atom-tree: "
385  << pose.conformation().torsion_angle( bb1, bb2, bb3, bb4 ) << std::endl;
386 
387  if ( prev_rsd.has( "O" ) ) {
388  core::id::AtomID ref_atomO( prev_rsd.atom_index( "O" ), upper_residue-1 );
389  core::id::AtomID atomO( pose.residue_type( upper_residue-1 ).atom_index( "O" ), upper_residue-1 );
390  fix_internal_coords_of_siblings( pose, ref_pose, atomO, ref_atomO );
391  }
392  if ( rsd.has( "H" ) ) {
393  core::id::AtomID ref_atomH( rsd.atom_index( "H" ), upper_residue );
394  core::id::AtomID atomH( new_resi.atom_index( "H" ), upper_residue );
395  runtime_assert( new_resi.has( "H" ) );
396  fix_internal_coords_of_siblings( pose, ref_pose, atomH, ref_atomH );
397  }
398 
399  if ( tr.Trace.visible() ) {
400  bool ideal1( core::pose::is_ideal_position( upper_residue, ref_pose ) );
401  if ( ideal1 && !core::pose::is_ideal_position( upper_residue, pose ) ) {
402  tr.Warning << " pose in RigidChunkClaimer is not ideal at position " << upper_residue << " although template pose was ideal there " << std::endl;
403  }
404 
405  bool ideal2( core::pose::is_ideal_position( upper_residue-1, ref_pose ) );
406  if ( ideal2 && !core::pose::is_ideal_position( upper_residue-1, pose ) ) {
407  tr.Warning << " pose in RigidChunkClaimer is not ideal at position " << upper_residue-1 << " although template pose was ideal there " << std::endl;
408  }
409  }
410 }
411 
412 void copy_internal_coords( pose::Pose& pose, pose::Pose const& ref_pose, loops::Loops core ) {
413  ///fpd if there are post modifications to pose (not in ref_pose), we can't just copy ref_pose->pose
414  ///fpd instead ... make xyz copy in rigid regions
415  for ( loops::Loops::const_iterator region = core.begin(); region != core.end(); ++region ) {
416  for (Size i=region->start(); i<=region->stop(); ++i) {
417  core::conformation::Residue const &rsd_i = ref_pose.residue(i);
418  pose.replace_residue ( i , rsd_i , false );
419  }
420  }
421 
422  if ( tr.Trace.visible() ) {
423  tr.Trace << pose.fold_tree() << std::endl;
424  tr.Trace << ref_pose.fold_tree() << std::endl;
425  }
426 
427  ///fpd fix connections
428  ///fpd this requires that the input pose have one flanking residue on each side of each region
429  for ( loops::Loops::const_iterator region = core.begin(); region != core.end(); ++region ) {
430  Size loop_start = region->start();
431  Size loop_stop = region->stop();
432 
433  bool lower_connect = ( loop_start > 1
434  && !pose.residue(loop_start).is_lower_terminus()
435  && !pose.fold_tree().is_cutpoint( loop_start-1 ) );
436  bool upper_connect = ( loop_stop < pose.total_residue()
437  && !pose.residue(loop_stop).is_upper_terminus()
438  && !pose.fold_tree().is_cutpoint( loop_stop ) );
439 
440  if ( lower_connect ) {
441  tr.Trace << "fixing lower connection for " << loop_start << std::endl;
442  fix_mainchain_connect( pose, ref_pose, loop_start );
443  } else {
444  tr.Trace << "NOT fixing lower connection for " << loop_start << std::endl;
445  }
446 
447  if ( upper_connect ) {
448  tr.Trace << "fixing upper connection for " << loop_stop << std::endl;
449  fix_mainchain_connect( pose, ref_pose, loop_stop+1 );
450  } else {
451  tr.Trace << "NOT fixing upper connection for " << loop_stop << std::endl;
452  }
453  }
454 }
455 
457  if ( bRigidInRelax_ ) {
458  rigid_core_.switch_movemap( mm, id::BB, false );
459  }
460 }
461 
462 void RigidChunkClaimer::initialize_dofs( core::pose::Pose& pose, DofClaims const& /* init_claims*/, DofClaims& /*failed_init_claims*/ ) {
463  //need to copy coords and jumps --- if chunks were idealized no problem .... but non-idealized stuff ?
464  //also take care of fullatom vs centroid...
465  // tr.Warning << "[WARNING] *** use input structure of RigidChunkClaimer --- NEEDS TO BE IDEALIZED !!! *** \n";
466  // to do this without idealized :
467 
468  // get rigid-body reorientation for first residue... this must be applied to all residues in the chunk,
469  // and then just set from the coordinates.
470 
471  //in a sense this is not necessary since we asked for exclusive rights for the stuff in RIGID ...
472  /* for ( DofClaims::const_iterator claim=init_claims.begin(); claim!=init_claims.end(); ++claim ) {
473  if ( (*claim)->owner() != this ) continue;
474 
475  }*/
476  // superimpose_chain (pose, input_pose_, rigid_core_ );
477 
478 
479  //need to have same number of residues for fold-tree transfer...
480  // would be nice to drop this restriction but for now, fill up with missing density...
481 
482  //fpd runtime_assert( pose.total_residue() == centroid_input_pose_.total_residue() );
483  //fpd really, we just have to make sure that #residues in the input pose > index of last rigid chunk
484  //fpd (strictly greater-than since we have to have a flanking res on each side of each region)
485  //fpd we still need missing dens in the gaps (but not at c term now!)
486  core::Size lastChunk=1;
488  lastChunk = std::max( lastChunk , it->stop() );
489  runtime_assert ( lastChunk <= centroid_input_pose_.total_residue() );
490 
491  bool missing_density( false );
492  //sanity check: no missing density in backbon in any of the rigid_core residues?
494  for ( Size pos = it->start(); pos <=it->stop(); ++pos ) {
495  // Do we really have Sidechains ?
496  // check this my making sure that no SC atom is more than 20A (?) away from CA
500  if ( ( n_pos - ca_pos).length() > 20 || ( ( n_pos - o_pos ).length() > 20 ) ) {
501  tr.Error << "missing backbone in rigid-chunk at " << pos << std::endl;
502  missing_density = true;
503  }
504  }
505  }
506  if ( missing_density ) throw utility::excn::EXCN_BadInput( " missing density in backbone of rigid-chunk... check your LOOP definition");
507  //centroid_input_pose_.fold_tree( pose.fold_tree() );
508  runtime_assert( !pose.is_fullatom() );
509  runtime_assert( !centroid_input_pose_.is_fullatom() );
511 }
512 
513 ///@brief multiply your bias to this -- if its zero don't change that, i.e., multiply only
516 }
517 
518 
520  loops::Loops const& region( current_rigid_core_ );
521 
522  // copy sidechain torsions from input pose
523  tr.Debug << "copy side chains for residues with * / missing density residues with - ";
524  for ( loops::Loops::const_iterator it = region.begin(); it!=region.end(); ++it ) {
525  for ( Size pos = it->start(); pos <=it->stop(); ++pos ) {
526  bNeedToRepack[ pos ] = false; //in principle our residues don't need a repack since we have a side-chains for them.
527  // Do we really have Sidechains ?
528  // check this my making sure that no SC atom is more than 20A (?) away from CA
530  for ( Size j = 1; j<=input_pose_.residue( pos ).natoms(); ++j ) {
531  if ( ( ca_pos - input_pose_.residue( pos ).atom( j ).xyz()).length() > 20 ) {
532  tr.Debug << "-" << pos << " ";
533  bNeedToRepack[ pos ] = true;
534  break; //one bad atom is enough
535  }
536  }
537  //copy sidechains only for non-loop regions
538  if ( !bNeedToRepack[ pos ] ) {
539  tr.Debug << "*" << pos << " ";
540  bool const lower_cut ( pose.residue( pos ).has_variant_type( chemical::CUTPOINT_LOWER ) );
541  bool const upper_cut ( pose.residue( pos ).has_variant_type( chemical::CUTPOINT_UPPER ) );
542  //bool const disulf ( pose.residue( pos ).has_variant_type( chemical::DISULFIDE ) );
543  pose.replace_residue( pos, input_pose_.residue( pos ), true /*orient backbone*/ );
546  }
547  } //for all rigid residues
548  }
549  tr.Debug << " that have not moved from start-structure" << std::endl;
550 } //switch to fullatom
551 
552 
553 // ================================================================================
554 // ======================== JumpCalculator ========================================
555 // ================================================================================
556 RigidChunkClaimer::JumpCalculator::JumpCalculator( loops::Loops const& rigid, bool bAllowAdjacentJumps )
557  : rigid_ ( rigid ),
558  visited_( rigid.size(), 0 ),
559  new_nr_( 1 ),
560  bAllowAdjacentJumps_( bAllowAdjacentJumps ) {}
561 
562 bool is_not_neighbor_to_rigid( loops::Loops const& rigid, Size pos1, Size pos2 ) {
563  Size up1 = rigid.loop_index_of_residue( pos1-1 );
564  Size in1 = rigid.loop_index_of_residue( pos1 );
565  Size down1 = rigid.loop_index_of_residue( pos1+1 );
566 
567  Size down2 = rigid.loop_index_of_residue( pos2+1 );
568  Size in2 = rigid.loop_index_of_residue( pos2 );
569  Size up2 = rigid.loop_index_of_residue( pos2-1 );
570  if ( !in1 && ( up1 && down1 ) ) return false;
571  if ( !in2 && ( up2 && down2 ) ) return false;
572  return true;
573 }
574 
575 bool connects_rigid_regions( loops::Loops const& rigid, Size pos1, Size pos2 ) {
576  Size rigid1 = rigid.loop_index_of_residue( pos1 );
577  Size rigid2 = rigid.loop_index_of_residue( pos2 );
578  return rigid1 && rigid2;
579 }
580 
582  tr.Trace << "RigidChunk::irrelavant jumps " << pos1 << " " << pos2 << std::endl;
583  tr.Trace << "connects_rigid: " << connects_rigid_regions( rigid_, pos1, pos2 ) << std::endl;
584  tr.Trace << "is not_neighbor_to_rigid: " << is_not_neighbor_to_rigid( rigid_, pos1, pos2 ) << std::endl;
585  tr.Trace << "bAllowAdjacent: " << bAllowAdjacentJumps_ << std::endl;
586 
587  if ( !connects_rigid_regions( rigid_, pos1, pos2 ) ) {
588  return bAllowAdjacentJumps_ || is_not_neighbor_to_rigid( rigid_, pos1, pos2 );//jump doesn't connect two rigid regions -- irrelevant
589  }
590  return false; //either connects rigid regions or is neighbor to rigid region
591 }
592 
593 ///@brief check if this (relevant) jump is compatible with our rigid-structure
594 /// not on the same continuous stretch of rigid residues ( we don't allow cuts within rigid stretches )
595 /// not connecting already conntected rigid stretches
596 /// if it connects two unconnected rigid stretches ---> its a good jump we'll keep it,
597 /// *** --> update visited_ ***
599  Size up_loop( rigid_.loop_index_of_residue( up ) );
600  Size down_loop( rigid_.loop_index_of_residue( down ) );
601 
602  //we arrive here only if jump is not irrelevant...
603  //if we don't allow adjacent jump that means this jump connects rigid regions or is a bad neighbor
604  if ( !bAllowAdjacentJumps_ && !connects_rigid_regions( rigid_, up, down ) ) return false;
605 
606  runtime_assert( up_loop && down_loop ); //since this would be irrelevant --- already checked.
607 
608  //don't allow jumps within same loop -- that will make a nasty cut within rigid region
609  if ( up_loop == down_loop ) return false;
610 
611  // at this point rigid1 and rigid2 refer to rigid regions but not the same --- this might be useful if we need to connect rigid regions
612 
613  runtime_assert( visited_.size() >= up_loop );
614  runtime_assert( visited_.size() >= down_loop );
615  // jump touches unvisited regions or visited but yet disconnected regions
616  if ( !visited_[ up_loop ] || !visited_[ down_loop ] || ( visited_[ up_loop ] != visited_[ down_loop ] ) ) {
617 
618  // decide upon 3 cases: both nodes unvisited, 1 node visited, both nodes visited
619  // case0 : both new tag with new jump_nr
620  Size visit_nr = new_nr_++;
621  // case1 : both visited--> replace all higher visit_nr by lower visit_nr
622  if ( visited_[ up_loop ] && visited_[ down_loop ] ) {
623  Size old_visit_nr = visited_[ down_loop ]; //arbitrary choice
624  visit_nr = visited_[ up_loop ];
625  for ( Size i=1; i<=visited_.size(); i++ ) {
626  if ( visited_[ i ] == old_visit_nr ) visited_[ i ] = visit_nr;
627  }
628  } else if ( visited_[ up_loop ] || visited_[ down_loop ]) {
629  // case2: one already visited the other is still zero and thus neutral to addition
630  visit_nr = visited_[ up_loop ] + visited_[ down_loop ];
631  } // case3: none visited
632  visited_[ up_loop ] = visit_nr;
633  visited_[ down_loop ] = visit_nr;
634  return true;
635  } // jump between different regions
636  return false;
637 }
638 
639 ///@detail generate a list of Jumps (Size tupels) that fix the remaining part of the chunk
640 void
642  if( visited_.size() == 0 ){ // No rigid chunks ??
643  return;
644  }
645 
646  //now we have a connection pattern based on the jumps already present.
647  //take a visited region and make it the root-reg
648  Size root_reg = 0;
649  for ( Size region = 1; region <= visited_.size(); region++ ) {
650  if ( visited_[ region ] ) {
651  root_reg = region;
652  break;
653  }
654  }
655 
656  // if no rigid regions are yet connected, define one arbitrarily as the root-reg
657  if ( root_reg == 0 ) {
658  root_reg = 1;
659  runtime_assert( visited_.size() > 0 );
660  visited_[ root_reg ] = 1;
661  }
662 
663  loops::Loops::LoopList rigid_loops = rigid_.loops(); // loops in sequence that correspond to the regions
664 
665  // take middle of this loop piece. ... there might be better ways to make the extra jumps...
666  Size const anchor( static_cast< Size >( 0.5*(rigid_loops[ root_reg ].stop()
667  - rigid_loops[ root_reg ].start()) ) + rigid_loops[ root_reg ].start() );
668 
669  for ( Size region = 1; region <= visited_.size(); region++ ) {
670  Size old_visited = visited_[ region ];
671  if ( visited_[ region ] != visited_[ root_reg ] ) {
672  Size target_pos ( rigid_loops[ region ].start()
673  + static_cast< Size >( 0.5*( rigid_loops[ region ].stop()-rigid_loops[ region ].start() ) ) );
674  extra_jumps.push_back( new JumpClaim( THIS, anchor, target_pos, DofClaim::EXCLUSIVE ) );
675  visited_[ region ] = visited_[ root_reg ];
676 
677  if ( old_visited ) { //if we connected a cluster make sure to update all its nodes
678  for ( Size i=1; i<=visited_.size(); i++ ) {
679  if ( visited_[ i ] == old_visited ) visited_[ i ] = visited_[ root_reg ];
680  }
681  }
682  }
683  } // for region
684 } //generate_rigidity_jumps
685 
686 } //topology_broker
687 } //protocols