181 if( lrgs(n2lidx(copy)).reg() != nk_reg ) break;
182 blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd);
183 if( n->in(k) != copy ) break; // Failed for some cutout?
184 x = copy; // Progress, try again
185 }
186
187 // Phis and 2-address instructions cannot change registers so easily - their
188 // outputs must match their input.
189 if( !can_change_regs )
190 return blk_adjust; // Only check stupid copies!
191
192 // Loop backedges won't have a value-mapping yet
193 if( &value == NULL ) return blk_adjust;
194
195 // Skip through all copies to the _value_ being used. Do not change from
196 // int to pointer. This attempts to jump through a chain of copies, where
197 // intermediate copies might be illegal, i.e., value is stored down to stack
198 // then reloaded BUT survives in a register the whole way.
199 Node *val = skip_copies(n->in(k));
200
201 if( val == x ) return blk_adjust; // No progress?
202
203 bool single = is_single_register(val->ideal_reg());
204 uint val_idx = n2lidx(val);
205 OptoReg::Name val_reg = lrgs(val_idx).reg();
206
207 // See if it happens to already be in the correct register!
208 // (either Phi's direct register, or the common case of the name
209 // never-clobbered original-def register)
210 if( value[val_reg] == val &&
211 // Doubles check both halves
212 ( single || value[val_reg-1] == val ) ) {
213 blk_adjust += use_prior_register(n,k,regnd[val_reg],current_block,value,regnd);
214 if( n->in(k) == regnd[val_reg] ) // Success! Quit trying
215 return blk_adjust;
216 }
217
218 // See if we can skip the copy by changing registers. Don't change from
219 // using a register to using the stack unless we know we can remove a
220 // copy-load. Otherwise we might end up making a pile of Intel cisc-spill
|
181 if( lrgs(n2lidx(copy)).reg() != nk_reg ) break;
182 blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd);
183 if( n->in(k) != copy ) break; // Failed for some cutout?
184 x = copy; // Progress, try again
185 }
186
187 // Phis and 2-address instructions cannot change registers so easily - their
188 // outputs must match their input.
189 if( !can_change_regs )
190 return blk_adjust; // Only check stupid copies!
191
192 // Loop backedges won't have a value-mapping yet
193 if( &value == NULL ) return blk_adjust;
194
195 // Skip through all copies to the _value_ being used. Do not change from
196 // int to pointer. This attempts to jump through a chain of copies, where
197 // intermediate copies might be illegal, i.e., value is stored down to stack
198 // then reloaded BUT survives in a register the whole way.
199 Node *val = skip_copies(n->in(k));
200
201 if (val == x && nk_idx != 0 &&
202 regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
203 n2lidx(x) == n2lidx(regnd[nk_reg])) {
204 // When rematerialzing nodes and stretching lifetimes, the
205 // allocator will reuse the original def for multidef LRG instead
206 // of the current reaching def because it can't know it's safe to
207 // do so. After allocation completes if they are in the same LRG
208 // then it should use the current reaching def instead.
209 n->set_req(k, regnd[nk_reg]);
210 blk_adjust += yank_if_dead(val, current_block, &value, ®nd);
211 val = skip_copies(n->in(k));
212 }
213
214 if( val == x ) return blk_adjust; // No progress?
215
216 bool single = is_single_register(val->ideal_reg());
217 uint val_idx = n2lidx(val);
218 OptoReg::Name val_reg = lrgs(val_idx).reg();
219
220 // See if it happens to already be in the correct register!
221 // (either Phi's direct register, or the common case of the name
222 // never-clobbered original-def register)
223 if( value[val_reg] == val &&
224 // Doubles check both halves
225 ( single || value[val_reg-1] == val ) ) {
226 blk_adjust += use_prior_register(n,k,regnd[val_reg],current_block,value,regnd);
227 if( n->in(k) == regnd[val_reg] ) // Success! Quit trying
228 return blk_adjust;
229 }
230
231 // See if we can skip the copy by changing registers. Don't change from
232 // using a register to using the stack unless we know we can remove a
233 // copy-load. Otherwise we might end up making a pile of Intel cisc-spill
|