src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6921799 Sdiff src/share/vm/runtime

src/share/vm/runtime/sharedRuntime.cpp

Print this page




1334 
1335 
1336 #ifndef PRODUCT
1337   Atomic::inc(&_wrong_method_ctr);
1338 
1339   if (TraceCallFixup) {
1340     ResourceMark rm(thread);
1341     tty->print("handle_wrong_method reresolving call to");
1342     callee_method->print_short_name(tty);
1343     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1344   }
1345 #endif
1346 
1347   return callee_method;
1348 }
1349 
1350 // ---------------------------------------------------------------------------
1351 // We are calling the interpreter via a c2i. Normally this would mean that
1352 // we were called by a compiled method. However we could have lost a race
1353 // where we went int -> i2c -> c2i and so the caller could in fact be
1354 // interpreted. If the caller is compiled we attampt to patch the caller
1355 // so he no longer calls into the interpreter.
1356 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1357   methodOop moop(method);
1358 
1359   address entry_point = moop->from_compiled_entry();
1360 
1361   // It's possible that deoptimization can occur at a call site which hasn't
1362   // been resolved yet, in which case this function will be called from
1363   // an nmethod that has been patched for deopt and we can ignore the
1364   // request for a fixup.
1365   // Also it is possible that we lost a race in that from_compiled_entry
1366   // is now back to the i2c in that case we don't need to patch and if
1367   // we did we'd leap into space because the callsite needs to use
1368   // "to interpreter" stub in order to load up the methodOop. Don't
1369   // ask me how I know this...
1370   //
1371 
1372   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1373   if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {










1374     return;
1375   }
1376 
1377   // There is a benign race here. We could be attempting to patch to a compiled
1378   // entry point at the same time the callee is being deoptimized. If that is
1379   // the case then entry_point may in fact point to a c2i and we'd patch the
1380   // call site with the same old data. clear_code will set code() to NULL
1381   // at the end of it. If we happen to see that NULL then we can skip trying
1382   // to patch. If we hit the window where the callee has a c2i in the
1383   // from_compiled_entry and the NULL isn't present yet then we lose the race
1384   // and patch the code with the same old data. Asi es la vida.
1385 
1386   if (moop->code() == NULL) return;
1387 
1388   if (((nmethod*)cb)->is_in_use()) {
1389 
1390     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1391     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1392     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1393       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1394       //
1395       // bug 6281185. We might get here after resolving a call site to a vanilla
1396       // virtual call. Because the resolvee uses the verified entry it may then
1397       // see compiled code and attempt to patch the site by calling us. This would
1398       // then incorrectly convert the call site to optimized and its downhill from
1399       // there. If you're lucky you'll get the assert in the bugid, if not you've
1400       // just made a call site that could be megamorphic into a monomorphic site
1401       // for the rest of its life! Just another racing bug in the life of
1402       // fixup_callers_callsite ...
1403       //
1404       RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
1405       iter.next();
1406       assert(iter.has_current(), "must have a reloc at java call site");
1407       relocInfo::relocType typ = iter.reloc()->type();
1408       if ( typ != relocInfo::static_call_type &&




1334 
1335 
1336 #ifndef PRODUCT
1337   Atomic::inc(&_wrong_method_ctr);
1338 
1339   if (TraceCallFixup) {
1340     ResourceMark rm(thread);
1341     tty->print("handle_wrong_method reresolving call to");
1342     callee_method->print_short_name(tty);
1343     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1344   }
1345 #endif
1346 
1347   return callee_method;
1348 }
1349 
1350 // ---------------------------------------------------------------------------
1351 // We are calling the interpreter via a c2i. Normally this would mean that
1352 // we were called by a compiled method. However we could have lost a race
1353 // where we went int -> i2c -> c2i and so the caller could in fact be
1354 // interpreted. If the caller is compiled we attempt to patch the caller
1355 // so he no longer calls into the interpreter.
1356 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1357   methodOop moop(method);
1358 
1359   address entry_point = moop->from_compiled_entry();
1360 
1361   // It's possible that deoptimization can occur at a call site which hasn't
1362   // been resolved yet, in which case this function will be called from
1363   // an nmethod that has been patched for deopt and we can ignore the
1364   // request for a fixup.
1365   // Also it is possible that we lost a race in that from_compiled_entry
1366   // is now back to the i2c in that case we don't need to patch and if
1367   // we did we'd leap into space because the callsite needs to use
1368   // "to interpreter" stub in order to load up the methodOop. Don't
1369   // ask me how I know this...

1370 
1371   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1372   if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1373     return;
1374   }
1375 
1376   // The check above makes sure this is a nmethod.
1377   nmethod* nm = cb->as_nmethod_or_null();
1378   assert(nm, "must be");
1379 
1380   // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
1381   // to implement MethodHandle actions.
1382   if (nm->is_method_handle_return(caller_pc)) {
1383     return;
1384   }
1385 
1386   // There is a benign race here. We could be attempting to patch to a compiled
1387   // entry point at the same time the callee is being deoptimized. If that is
1388   // the case then entry_point may in fact point to a c2i and we'd patch the
1389   // call site with the same old data. clear_code will set code() to NULL
1390   // at the end of it. If we happen to see that NULL then we can skip trying
1391   // to patch. If we hit the window where the callee has a c2i in the
1392   // from_compiled_entry and the NULL isn't present yet then we lose the race
1393   // and patch the code with the same old data. Asi es la vida.
1394 
1395   if (moop->code() == NULL) return;
1396 
1397   if (nm->is_in_use()) {
1398 
1399     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1400     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1401     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1402       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1403       //
1404       // bug 6281185. We might get here after resolving a call site to a vanilla
1405       // virtual call. Because the resolvee uses the verified entry it may then
1406       // see compiled code and attempt to patch the site by calling us. This would
1407       // then incorrectly convert the call site to optimized and its downhill from
1408       // there. If you're lucky you'll get the assert in the bugid, if not you've
1409       // just made a call site that could be megamorphic into a monomorphic site
1410       // for the rest of its life! Just another racing bug in the life of
1411       // fixup_callers_callsite ...
1412       //
1413       RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
1414       iter.next();
1415       assert(iter.has_current(), "must have a reloc at java call site");
1416       relocInfo::relocType typ = iter.reloc()->type();
1417       if ( typ != relocInfo::static_call_type &&


src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File