254
255 if( !nm->is_marked_for_deoptimization() )
256 return false;
257
258 // If at the return point, then the frame has already been popped, and
259 // only the return needs to be executed. Don't deoptimize here.
260 return !nm->is_at_poll_return(pc());
261 }
262
263 bool frame::can_be_deoptimized() const {
264 if (!is_compiled_frame()) return false;
265 CompiledMethod* nm = (CompiledMethod*)_cb;
266
267 if( !nm->can_be_deoptimized() )
268 return false;
269
270 return !nm->is_at_poll_return(pc());
271 }
272
273 void frame::deoptimize(JavaThread* thread) {
274 // Schedule deoptimization of an nmethod activation with this frame.
275 assert(_cb != NULL && _cb->is_compiled(), "must be");
276
277 // This is a fix for register window patching race
278 if (NeedsDeoptSuspend && Thread::current() != thread) {
279 assert(SafepointSynchronize::is_at_safepoint(),
280 "patching other threads for deopt may only occur at a safepoint");
281
282 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
283 // we could see the frame again and ask for it to be deoptimized since
284 // it might move for a long time. That is harmless and we just ignore it.
285 if (id() == thread->must_deopt_id()) {
286 assert(thread->is_deopt_suspend(), "lost suspension");
287 return;
288 }
289
290 // We are at a safepoint so the target thread can only be
291 // in 4 states:
292 // blocked - no problem
293 // blocked_trans - no problem (i.e. could have woken up from blocked
294 // during a safepoint).
295 // native - register window pc patching race
296 // native_trans - momentary state
297 //
298 // We could just wait out a thread in native_trans to block.
299 // Then we'd have all the issues that the safepoint code has as to
300 // whether to spin or block. It isn't worth it. Just treat it like
301 // native and be done with it.
302 //
303 // Examine the state of the thread at the start of safepoint since
304 // threads that were in native at the start of the safepoint could
305 // come to a halt during the safepoint, changing the current value
306 // of the safepoint_state.
307 JavaThreadState state = thread->safepoint_state()->orig_thread_state();
308 if (state == _thread_in_native || state == _thread_in_native_trans) {
309 // Since we are at a safepoint the target thread will stop itself
310 // before it can return to java as long as we remain at the safepoint.
311 // Therefore we can put an additional request for the thread to stop
312 // no matter what no (like a suspend). This will cause the thread
313 // to notice it needs to do the deopt on its own once it leaves native.
314 //
315 // The only reason we must do this is because on machine with register
316 // windows we have a race with patching the return address and the
317 // window coming live as the thread returns to the Java code (but still
318 // in native mode) and then blocks. It is only this top most frame
319 // that is at risk. So in truth we could add an additional check to
320 // see if this frame is one that is at risk.
321 RegisterMap map(thread, false);
322 frame at_risk = thread->last_frame().sender(&map);
323 if (id() == at_risk.id()) {
324 thread->set_must_deopt_id(id());
325 thread->set_deopt_suspend();
326 return;
327 }
328 }
329 } // NeedsDeoptSuspend
330
331
332 // If the call site is a MethodHandle call site use the MH deopt
333 // handler.
334 CompiledMethod* cm = (CompiledMethod*) _cb;
335 address deopt = cm->is_method_handle_return(pc()) ?
336 cm->deopt_mh_handler_begin() :
337 cm->deopt_handler_begin();
338
339 // Save the original pc before we patch in the new one
340 cm->set_original_pc(this, pc());
341 patch_pc(thread, deopt);
342
343 #ifdef ASSERT
344 {
345 RegisterMap map(thread, false);
346 frame check = thread->last_frame();
347 while (id() != check.id()) {
348 check = check.sender(&map);
349 }
350 assert(check.is_deoptimized_frame(), "missed deopt");
|
254
255 if( !nm->is_marked_for_deoptimization() )
256 return false;
257
258 // If at the return point, then the frame has already been popped, and
259 // only the return needs to be executed. Don't deoptimize here.
260 return !nm->is_at_poll_return(pc());
261 }
262
263 bool frame::can_be_deoptimized() const {
264 if (!is_compiled_frame()) return false;
265 CompiledMethod* nm = (CompiledMethod*)_cb;
266
267 if( !nm->can_be_deoptimized() )
268 return false;
269
270 return !nm->is_at_poll_return(pc());
271 }
272
273 void frame::deoptimize(JavaThread* thread) {
274 assert(thread->frame_anchor()->has_last_Java_frame() &&
275 thread->frame_anchor()->walkable(), "must be");
276 // Schedule deoptimization of an nmethod activation with this frame.
277 assert(_cb != NULL && _cb->is_compiled(), "must be");
278
279 // If the call site is a MethodHandle call site use the MH deopt
280 // handler.
281 CompiledMethod* cm = (CompiledMethod*) _cb;
282 address deopt = cm->is_method_handle_return(pc()) ?
283 cm->deopt_mh_handler_begin() :
284 cm->deopt_handler_begin();
285
286 // Save the original pc before we patch in the new one
287 cm->set_original_pc(this, pc());
288 patch_pc(thread, deopt);
289
290 #ifdef ASSERT
291 {
292 RegisterMap map(thread, false);
293 frame check = thread->last_frame();
294 while (id() != check.id()) {
295 check = check.sender(&map);
296 }
297 assert(check.is_deoptimized_frame(), "missed deopt");
|