Rosetta 3.5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
InsertChunkMover.cc
Go to the documentation of this file.
1 // -*- mode:c++;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
2 // vi: set ts=2 noet:
3 //
4 // (c) Copyright Rosetta Commons Member Institutions.
5 // (c) This file is part of the Rosetta software suite and is made available under license.
6 // (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
7 // (c) For more information, see http://www.rosettacommons.org. Questions about this can be
8 // (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
9 
10 /// @file
11 /// @brief Align a random jump to template
12 /// @detailed
13 /// @author Yifan Song
14 
17 
18 #include <core/pose/Pose.hh>
19 #include <core/pose/util.hh>
20 #include <core/pose/util.tmpl.hh>
21 
22 #include <core/id/AtomID.hh>
23 #include <core/id/AtomID_Map.hh>
28 
29 //#include <core/kinematics/FoldTree.hh>
30 
31 // history
34 #include <basic/datacache/BasicDataCache.hh>
35 
36 //
37 #include <core/fragment/Frame.hh>
39 
40 #include <numeric/xyzVector.hh>
41 #include <numeric/xyz.functions.hh>
42 
43 #include <protocols/moves/Mover.hh>
44 
45 #include <basic/options/option.hh>
46 #include <basic/options/keys/OptionKeys.hh>
47 #include <basic/options/keys/cm.OptionKeys.gen.hh>
48 
49 #include <ObjexxFCL/FArray1D.hh>
50 #include <ObjexxFCL/FArray2D.hh>
51 #include <ObjexxFCL/format.hh>
52 #include <numeric/random/random.hh>
53 #include <numeric/model_quality/rms.hh>
54 #include <numeric/model_quality/maxsub.hh>
55 
56 #include <basic/Tracer.hh>
57 
58 static numeric::random::RandomGenerator RG(1183103);
59 static basic::Tracer TR( "protocols.hybridization.InsertChunkMover" );
60 
61 namespace protocols {
62 namespace hybridization {
63 
64 using namespace core;
65 using namespace id;
66 using namespace ObjexxFCL;
67 
69 registry_shift_(0), anchor_insert_only_(false), align_to_ss_only_(false), copy_ss_torsion_only_(false), secstruct_('L')
70 {
71  moves::Mover::type( "InsertChunkMover" );
72  align_trial_counter_.clear();
73 }
74 
76 
78  std::map <core::Size, core::Size> const & sequence_alignment) {
79  template_pose_ = template_pose;
80  template_id_ = template_id;
81  sequence_alignment_ = sequence_alignment;
82 }
83 
84 void InsertChunkMover::set_aligned_chunk(core::pose::Pose const & pose, Size const jump_number, bool anchor_insert_only_in) {
85  jump_number_ = jump_number;
86  anchor_insert_only_ = anchor_insert_only_in;
87 
88  std::list < Size > downstream_residues = downstream_residues_from_jump(pose, jump_number_);
89  seqpos_start_ = downstream_residues.front();
90  seqpos_stop_ = downstream_residues.back();
91 
92  // make sure it is continuous, may not be necessary if the function gets expanded to handle more than 1 chunk
93  assert(downstream_residues.size() == (seqpos_stop_ - seqpos_start_ + 1));
94 }
95 
97  int registry_shift,
98  Size MAX_TRIAL) {
99  core::Size counter = 0;
100  TR.Debug << sequence_alignment_ << std::endl;
101  while (counter < MAX_TRIAL) {
102  ++counter;
105 
106  //fpd pick a random downstream residue and steal it's position from a template
107  //fpd if anchor_insert_only_ is set, use the jump anchor position
108  core::Size seqpos_pose = RG.random_range(seqpos_start_, seqpos_stop_);
109  if (anchor_insert_only_) {
110  seqpos_pose = pose.fold_tree().downstream_jump_residue( jump_number_ );
111  }
112 
113  TR.Debug << "Align Seqpos: " << seqpos_pose << std::endl;
114  if (sequence_alignment_.find(seqpos_pose+registry_shift) == sequence_alignment_.end()) continue;
115  core::Size seqpos_template = sequence_alignment_.find(seqpos_pose+registry_shift)->second;
116  TR.Debug << "Found Seqpos: " << seqpos_pose+registry_shift << " -> " << seqpos_template << std::endl;
117 
118  if (align_to_ss_only_ && template_pose_->secstruct(seqpos_template) == 'L') continue;
119 
120  TR.Debug << "Passed SS" << std::endl;
121 
122  if (template_pose_->secstruct(seqpos_template) != 'L') {
123  secstruct_ = template_pose_->secstruct(seqpos_template);
124  }
125 
126  // collect local alignment for stealing torsion
127  seqpos_aligned_start_ = seqpos_pose;
128  seqpos_aligned_stop_ = seqpos_pose;
129  for (Size ires_pose=seqpos_pose; ires_pose>=seqpos_start_; --ires_pose) {
130  if (sequence_alignment_.find(ires_pose+registry_shift) == sequence_alignment_.end())
131  break;
132 
133  core::Size jres_template = sequence_alignment_.find(ires_pose+registry_shift)->second;
134  if ( jres_template <= 0 || jres_template > template_pose_->total_residue() ) continue;
135 
136  if ( !template_pose_->residue_type(jres_template).is_protein() ) continue;
137  if ( copy_ss_torsion_only_ && template_pose_->secstruct(jres_template) == 'L') continue;
138 
139  sequence_alignment_local_[ires_pose] = jres_template;
140  seqpos_aligned_start_ = ires_pose;
141 
142  if (discontinued_upper(*template_pose_,jres_template)) {
143  TR.Debug << "Disconnect upper: " << ires_pose << " " << jres_template << std::endl;
144  break;
145  }
146  }
147 
148  for (Size ires_pose=seqpos_pose+1; ires_pose<=seqpos_stop_; ++ires_pose) {
149  if (sequence_alignment_.find(ires_pose+registry_shift) == sequence_alignment_.end()) break;
150  core::Size jres_template = sequence_alignment_.find(ires_pose+registry_shift)->second;
151 
152  if ( jres_template <= 0 || jres_template > template_pose_->total_residue() ) continue;
153 
154  if ( !template_pose_->residue_type(jres_template).is_protein() ) continue;
155  if (copy_ss_torsion_only_ && template_pose_->secstruct(jres_template) == 'L') continue;
156 
157  sequence_alignment_local_[ires_pose] = jres_template;
158  seqpos_aligned_stop_ = ires_pose;
159 
160  if (discontinued_lower(*template_pose_,jres_template)) {
161  TR.Debug << "Disconnect lower: " << ires_pose << " " << jres_template << std::endl;
162  break;
163  }
164  }
165 
166  // collect atom_map for superposition
167  core::Size atom_map_count = 0;
168  for (Size ires_pose=seqpos_pose; ires_pose>=seqpos_start_; --ires_pose) {
169  if (sequence_alignment_.find(ires_pose+registry_shift) == sequence_alignment_.end()) break;
170  if ( !pose.residue_type(ires_pose).is_protein() ) continue;
171 
172  core::Size jres_template = sequence_alignment_.find(ires_pose+registry_shift)->second;
173  if ( jres_template <= 0 || jres_template > template_pose_->total_residue() ) continue;
174 
175  if ( !template_pose_->residue_type(jres_template).is_protein() ) continue;
176  if (copy_ss_torsion_only_ && template_pose_->secstruct(jres_template) == 'L') continue;
177 
178  core::id::AtomID const id1( pose.residue_type(ires_pose).atom_index("CA"), ires_pose );
179  core::id::AtomID const id2( template_pose_->residue_type(jres_template).atom_index("CA"), jres_template );
180  atom_map_[ id1 ] = id2;
181  ++atom_map_count;
182 
183  if (discontinued_upper(*template_pose_,jres_template)) break;
184  }
185  for (Size ires_pose=seqpos_pose+1; ires_pose<=seqpos_stop_; ++ires_pose) {
186  if (sequence_alignment_.find(ires_pose+registry_shift) == sequence_alignment_.end()) break;
187  if ( !pose.residue_type(ires_pose).is_protein() ) continue;
188 
189  core::Size jres_template = sequence_alignment_.find(ires_pose+registry_shift)->second;
190  if ( jres_template <= 0 || jres_template > template_pose_->total_residue() ) continue;
191 
192  if ( !template_pose_->residue_type(jres_template).is_protein() ) continue;
193  if ( copy_ss_torsion_only_ ) {
194  if (template_pose_->secstruct(jres_template) == 'L') continue;
195  }
196 
197  core::id::AtomID const id1( pose.residue_type(ires_pose).atom_index("CA"), ires_pose );
198  core::id::AtomID const id2( template_pose_->residue_type(jres_template).atom_index("CA"), jres_template );
199  atom_map_[ id1 ] = id2;
200  ++atom_map_count;
201  if (discontinued_lower(*template_pose_,jres_template)) break;
202  }
203 
204  // fpd we need at least 3 residues aligned
205  if (atom_map_count >=3) {
206  TR.Debug << sequence_alignment_local_ << std::endl;
207  return true;
208  }
209  }
210 
211  //TR << "Failing to get aligned: " << sequence_alignment_local_ << std::endl;
213  return false;
214 }
215 
216 
217 void InsertChunkMover::set_registry_shift(int registry_shift) {
218  registry_shift_ = registry_shift;
219 }
220 
222  if (ires <= align_trial_counter_.size()) {
223  return align_trial_counter_[ires];
224  }
225  return 0;
226 }
227 
228 void
230  // apply alignment
232  if (!success_) return;
233 
234  set_bb_xyz_aligned(pose);
235  check_overlap(pose);
236 }
237 
243 
244  Size jump_residue_pose = pose.fold_tree().downstream_jump_residue(jump_number_);
245  TR.Debug << "Jump residue: " << jump_residue_pose << std::endl;
246 
247  // copy xyz of the backbone
248  for (Size ires_pose=seqpos_aligned_start_; ires_pose<=seqpos_aligned_stop_; ++ires_pose) {
249  if (sequence_alignment_local_.find(ires_pose) != sequence_alignment_local_.end()) {
250  core::Size jres_template = sequence_alignment_local_.find(ires_pose)->second;
251  TR.Debug << "Copy xyz of residue " << ires_pose << std::endl;
252  for ( Size iatom=1; iatom <= pose.residue_type(ires_pose).last_backbone_atom(); ++iatom ) { // use residue_type to prevent internal coord update
253  std::string atom_name(pose.residue_type(ires_pose).atom_name(iatom));
254  if (template_pose_->residue_type(jres_template).has(atom_name)) {
255  Size jatom = template_pose_->residue_type(jres_template).atom_index(atom_name);
256  ids.push_back(core::id::AtomID(iatom,ires_pose));
257  positions.push_back(template_pose_->xyz(core::id::AtomID(jatom,jres_template)));
258  }
259  else {
260  sch_ids.push_back(core::id::AtomID(iatom,ires_pose));
261  }
262  }
263 
264  for ( Size iatom=pose.residue_type(ires_pose).last_backbone_atom()+1;
265  iatom<= pose.residue_type(ires_pose).natoms(); ++iatom ) { // use residue_type to prevent internal coord update
266  sch_ids.push_back(core::id::AtomID(iatom,ires_pose));
267  }
268 
269  while (ires_pose > align_trial_counter_.size()) {
270  align_trial_counter_.push_back(0);
271  }
272  ++align_trial_counter_[ires_pose];
273  }
274  }
275 
276  pose.batch_set_xyz(ids,positions);
277 
278  // idealize sidechains
279  for (Size iatom = 1; iatom <= sch_ids.size(); ++iatom) {
280  sch_positions.push_back(
281  pose.residue(sch_ids[iatom].rsd()).build_atom_ideal( sch_ids[iatom].atomno(), pose.conformation() )
282  );
283  }
284  pose.batch_set_xyz(sch_ids,sch_positions);
285 
286  // idealize the connection between copied and uncopied region
290  }
294  }
295 
297  TemplateHistory &history =
300 }
301 
303  bool overlapped = false;
304  for ( Size ires=seqpos_start_; ires<= seqpos_stop_; ++ires ) {
305  if (!pose.residue_type(ires).has("CA")) continue;
306  for ( Size jres=1; jres<= pose.total_residue(); ++jres ) {
307  if (jres >=seqpos_start_ && jres<= seqpos_stop_) continue;
308  if (!pose.residue_type(jres).has("CA")) continue;
309  numeric::xyzVector < core::Real > xyz_iatom (pose.residue(ires).xyz("CA"));
310  numeric::xyzVector < core::Real > xyz_jatom (pose.residue(jres).xyz("CA"));
311  if (xyz_iatom.distance_squared(xyz_jatom) < 1e-4) {
312  overlapped = true;
313  break;
314  }
315  }
316  if (overlapped) break;
317  }
318  if (overlapped) {
321  numeric::xyzVector<core::Real> trans(2.*RG.uniform()-1.,
322  2.*RG.uniform()-1.,
323  2.*RG.uniform()-1.);
324 
325  for ( Size ires=seqpos_start_; ires<= seqpos_stop_; ++ires ) {
326  for ( Size iatom=1; iatom<= pose.residue_type(ires).natoms(); ++iatom ) { // use residue_type to prevent internal coord update
327  ids.push_back(core::id::AtomID(iatom,ires));
328  positions.push_back( pose.residue(ires).xyz(iatom) + trans);
329  }
330  }
331  pose.batch_set_xyz(ids,positions);
332  }
333 }
334 
337  return "InsertChunkMover";
338 }
339 
340 
341 } // hybridization
342 } // protocols