< prev index next >

src/hotspot/share/code/compiledIC.cpp

     assert(k->verify_itable_index(itable_index), "sanity check");
 #endif //ASSERT
     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
                                                     call_info->resolved_klass(), false);
     holder->claim();
-    InlineCacheBuffer::create_transition_stub(this, holder, entry);
+    if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
+      delete holder;
+      return false;
+    }
   } else {
     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
     // Can be different than selected_method->vtable_index(), due to package-private etc.
     int vtable_index = call_info->vtable_index();
     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
     entry = VtableStubs::find_vtable_stub(vtable_index);
     if (entry == NULL) {
       return false;
     }
-    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
+    if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
+      return false;
+    }
   }
 
   if (TraceICs) {
     ResourceMark rm;
     assert(!call_info->selected_method().is_null(), "Unexpected null selected method");

@@ -348,11 +353,11 is_call_to_interpreted = _call->is_call_to_interpreted(dest); } return is_call_to_interpreted; } -void CompiledIC::set_to_clean(bool in_use) { +bool CompiledIC::set_to_clean(bool in_use) { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); if (TraceInlineCacheClearing || TraceICs) { tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); print(); }
@@ -371,19 +376,22 } else { set_ic_destination_and_value(entry, (void*)NULL); } } else { // Unsafe transition - create stub. - InlineCacheBuffer::create_transition_stub(this, NULL, entry); + if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) { + return false; + } } // We can't check this anymore. With lazy deopt we could have already // cleaned this IC entry before we even return. This is possible if // we ran out of space in the inline cache buffer trying to do the // set_next and we safepointed to free up space. This is a benign // race because the IC entry was complete when we safepointed so // cleaning it immediately is harmless. // assert(is_clean(), "sanity check"); + return true; } bool CompiledIC::is_clean() const { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); bool is_clean = false;
@@ -391,11 +399,11 is_clean = dest == _call->get_resolve_call_stub(is_optimized()); assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); return is_clean; } -void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { +bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); // Updating a cache to the wrong entry can cause bugs that are very hard // to track down - if cache entry gets invalid - we just clean it. In // this way it is always the same code path that is responsible for // updating and resolving an inline cache
@@ -428,11 +436,15 (info.to_aot() ? "aot" : "interpreter"), method->print_value_string()); } } else { // Call via method-klass-holder - InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); + CompiledICHolder* holder = info.claim_cached_icholder(); + if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) { + delete holder; + return false; + } if (TraceICs) { ResourceMark rm(thread); tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); } }
@@ -448,11 +460,13 // non-verified entry point bool safe = SafepointSynchronize::is_at_safepoint() || (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); if (!safe) { - InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); + if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) { + return false; + } } else { if (is_optimized()) { set_ic_destination(info.entry()); } else { set_ic_destination_and_value(info.entry(), info.cached_metadata());
@@ -473,10 +487,11 // we ran out of space in the inline cache buffer trying to do the // set_next and we safepointed to free up space. This is a benign // race because the IC entry was complete when we safepointed so // cleaning it immediately is harmless. // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); + return true; } // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache) // static_bound: The call can be static bound. If it isn't also optimized, the property
@@ -573,20 +588,21 } } // ---------------------------------------------------------------------------- -void CompiledStaticCall::set_to_clean(bool in_use) { +bool CompiledStaticCall::set_to_clean(bool in_use) { // in_use is unused but needed to match template function in CompiledMethod assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); // Reset call site MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); set_destination_mt_safe(resolve_call_stub()); // Do not reset stub here: It is too expensive to call find_stub. // Instead, rely on caller (nmethod::clear_inline_caches) to clear // both the call and its stub. + return true; } bool CompiledStaticCall::is_clean() const { return destination() == resolve_call_stub(); }
< prev index next >