< prev index next >

src/share/vm/code/compiledIC.cpp

Print this page




 270   return VtableStubs::is_entry_point(ic_destination());
 271 }
 272 
 273 bool CompiledIC::is_call_to_compiled() const {
 274   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 275 
 276   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
 277   // method is guaranteed to still exist, since we only remove methods after all inline caches
 278   // has been cleaned up
 279   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 280   bool is_monomorphic = (cb != NULL && cb->is_nmethod());
 281   // Check that the cached_value is a klass for non-optimized monomorphic calls
 282   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
 283   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
 284 #ifdef ASSERT
 285   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
 286   bool is_c1_method = caller->is_compiled_by_c1();
 287   assert( is_c1_method ||
 288          !is_monomorphic ||
 289          is_optimized() ||

 290          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 291 #endif // ASSERT
 292   return is_monomorphic;
 293 }
 294 
 295 
 296 bool CompiledIC::is_call_to_interpreted() const {
 297   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 298   // Call to interpreter if destination is either calling to a stub (if it
 299   // is optimized), or calling to an I2C blob
 300   bool is_call_to_interpreted = false;
 301   if (!is_optimized()) {
 302     // must use unsafe because the destination can be a zombie (and we're cleaning)
 303     // and the print_compiled_ic code wants to know if site (in the non-zombie)
 304     // is to the interpreter.
 305     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 306     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
 307     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
 308   } else {
 309     // Check if we are calling into our own codeblob (i.e., to a stub)
 310     CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
 311     address dest = ic_destination();
 312 #ifdef ASSERT
 313     {
 314       CodeBlob* db = CodeCache::find_blob_unsafe(dest);
 315       assert(!db->is_adapter_blob(), "must use stub!");
 316     }
 317 #endif /* ASSERT */
 318     is_call_to_interpreted = cb->contains(dest);
 319   }
 320   return is_call_to_interpreted;
 321 }
 322 
 323 
 324 void CompiledIC::set_to_clean() {
 325   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
 326   if (TraceInlineCacheClearing || TraceICs) {
 327     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
 328     print();
 329   }
 330 
 331   address entry;
 332   if (is_optimized()) {
 333     entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
 334   } else {
 335     entry = SharedRuntime::get_resolve_virtual_call_stub();
 336   }
 337 
 338   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
 339   // we only need to patch the destination
 340   bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
 341 
 342   if (safe_transition) {
 343     // Kill any leftover stub we might have too
 344     clear_ic_stub();
 345     if (is_optimized()) {
 346       set_ic_destination(entry);
 347     } else {
 348       set_ic_destination_and_value(entry, (void*)NULL);
 349     }
 350   } else {
 351     // Unsafe transition - create stub.
 352     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 353   }
 354   // We can't check this anymore. With lazy deopt we could have already
 355   // cleaned this IC entry before we even return. This is possible if
 356   // we ran out of space in the inline cache buffer trying to do the
 357   // set_next and we safepointed to free up space. This is a benign
 358   // race because the IC entry was complete when we safepointed so
 359   // cleaning it immediately is harmless.
 360   // assert(is_clean(), "sanity check");




 270   return VtableStubs::is_entry_point(ic_destination());
 271 }
 272 
 273 bool CompiledIC::is_call_to_compiled() const {
 274   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 275 
 276   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
 277   // method is guaranteed to still exist, since we only remove methods after all inline caches
 278   // has been cleaned up
 279   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 280   bool is_monomorphic = (cb != NULL && cb->is_nmethod());
 281   // Check that the cached_value is a klass for non-optimized monomorphic calls
 282   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
 283   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
 284 #ifdef ASSERT
 285   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
 286   bool is_c1_method = caller->is_compiled_by_c1();
 287   assert( is_c1_method ||
 288          !is_monomorphic ||
 289          is_optimized() ||
 290          !caller->is_alive() ||
 291          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 292 #endif // ASSERT
 293   return is_monomorphic;
 294 }
 295 
 296 
 297 bool CompiledIC::is_call_to_interpreted() const {
 298   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 299   // Call to interpreter if destination is either calling to a stub (if it
 300   // is optimized), or calling to an I2C blob
 301   bool is_call_to_interpreted = false;
 302   if (!is_optimized()) {
 303     // must use unsafe because the destination can be a zombie (and we're cleaning)
 304     // and the print_compiled_ic code wants to know if site (in the non-zombie)
 305     // is to the interpreter.
 306     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 307     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
 308     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
 309   } else {
 310     // Check if we are calling into our own codeblob (i.e., to a stub)
 311     CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
 312     address dest = ic_destination();
 313 #ifdef ASSERT
 314     {
 315       CodeBlob* db = CodeCache::find_blob_unsafe(dest);
 316       assert(!db->is_adapter_blob(), "must use stub!");
 317     }
 318 #endif /* ASSERT */
 319     is_call_to_interpreted = cb->contains(dest);
 320   }
 321   return is_call_to_interpreted;
 322 }
 323 
 324 
 325 void CompiledIC::set_to_clean(bool in_use) {
 326   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
 327   if (TraceInlineCacheClearing || TraceICs) {
 328     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
 329     print();
 330   }
 331 
 332   address entry;
 333   if (is_optimized()) {
 334     entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
 335   } else {
 336     entry = SharedRuntime::get_resolve_virtual_call_stub();
 337   }
 338 
 339   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
 340   // we only need to patch the destination
 341   bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
 342 
 343   if (safe_transition) {
 344     // Kill any leftover stub we might have too
 345     clear_ic_stub();
 346     if (is_optimized()) {
 347       set_ic_destination(entry);
 348     } else {
 349       set_ic_destination_and_value(entry, (void*)NULL);
 350     }
 351   } else {
 352     // Unsafe transition - create stub.
 353     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 354   }
 355   // We can't check this anymore. With lazy deopt we could have already
 356   // cleaned this IC entry before we even return. This is possible if
 357   // we ran out of space in the inline cache buffer trying to do the
 358   // set_next and we safepointed to free up space. This is a benign
 359   // race because the IC entry was complete when we safepointed so
 360   // cleaning it immediately is harmless.
 361   // assert(is_clean(), "sanity check");


< prev index next >