1 /*
   2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/relocInfo.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/bytecode.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/heap.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/forte.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/interfaceSupport.inline.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/safepoint.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/vframe.hpp"
  45 #include "services/memoryService.hpp"
  46 #include "utilities/align.hpp"
  47 #ifdef COMPILER1
  48 #include "c1/c1_Runtime1.hpp"
  49 #endif
  50 
  51 const char* CodeBlob::compiler_name() const {
  52   return compilertype2name(_type);
  53 }
  54 
  55 unsigned int CodeBlob::align_code_offset(int offset) {
  56   // align the size to CodeEntryAlignment
  57   return
  58     ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
  59     - (int)CodeHeap::header_size();
  60 }
  61 
  62 
  63 // This must be consistent with the CodeBlob constructor's layout actions.
  64 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
  65   unsigned int size = header_size;
  66   size += align_up(cb->total_relocation_size(), oopSize);
  67   // align the size to CodeEntryAlignment
  68   size = align_code_offset(size);
  69   size += align_up(cb->total_content_size(), oopSize);
  70   size += align_up(cb->total_oop_size(), oopSize);
  71   size += align_up(cb->total_metadata_size(), oopSize);
  72   return size;
  73 }
  74 
  75 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
  76   _name(name),
  77   _size(layout.size()),
  78   _header_size(layout.header_size()),
  79   _frame_complete_offset(frame_complete_offset),
  80   _data_offset(layout.data_offset()),
  81   _frame_size(frame_size),
  82   _strings(CodeStrings()),
  83   _oop_maps(oop_maps),
  84   _caller_must_gc_arguments(caller_must_gc_arguments),
  85   _code_begin(layout.code_begin()),
  86   _code_end(layout.code_end()),
  87   _data_end(layout.data_end()),
  88   _relocation_begin(layout.relocation_begin()),
  89   _relocation_end(layout.relocation_end()),
  90   _content_begin(layout.content_begin()),
  91   _type(type)
  92 {
  93   assert(is_aligned(layout.size(),            oopSize), "unaligned size");
  94   assert(is_aligned(layout.header_size(),     oopSize), "unaligned size");
  95   assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size");
  96   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
  97 #ifdef COMPILER1
  98   // probably wrong for tiered
  99   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
 100 #endif // COMPILER1
 101   S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
 102 }
 103 
 104 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
 105   _name(name),
 106   _size(layout.size()),
 107   _header_size(layout.header_size()),
 108   _frame_complete_offset(frame_complete_offset),
 109   _data_offset(layout.data_offset()),
 110   _frame_size(frame_size),
 111   _strings(CodeStrings()),
 112   _caller_must_gc_arguments(caller_must_gc_arguments),
 113   _code_begin(layout.code_begin()),
 114   _code_end(layout.code_end()),
 115   _data_end(layout.data_end()),
 116   _relocation_begin(layout.relocation_begin()),
 117   _relocation_end(layout.relocation_end()),
 118   _content_begin(layout.content_begin()),
 119   _type(type)
 120 {
 121   assert(is_aligned(_size,        oopSize), "unaligned size");
 122   assert(is_aligned(_header_size, oopSize), "unaligned size");
 123   assert(_data_offset <= _size, "codeBlob is too small");
 124   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
 125 
 126   set_oop_maps(oop_maps);
 127 #ifdef COMPILER1
 128   // probably wrong for tiered
 129   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
 130 #endif // COMPILER1
 131   S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
 132 }
 133 
 134 
 135 // Creates a simple CodeBlob. Sets up the size of the different regions.
 136 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
 137   : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
 138 {
 139   assert(is_aligned(locs_size, oopSize), "unaligned size");
 140 }
 141 
 142 
 143 // Creates a RuntimeBlob from a CodeBuffer
 144 // and copy code and relocation info.
 145 RuntimeBlob::RuntimeBlob(
 146   const char* name,
 147   CodeBuffer* cb,
 148   int         header_size,
 149   int         size,
 150   int         frame_complete,
 151   int         frame_size,
 152   OopMapSet*  oop_maps,
 153   bool        caller_must_gc_arguments
 154 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
 155   cb->copy_code_and_locs_to(this);
 156 }
 157 
 158 void CodeBlob::flush() {
 159   if (_oop_maps) {
 160     FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
 161     _oop_maps = NULL;
 162   }
 163   _strings.free();
 164 }
 165 
 166 void CodeBlob::set_oop_maps(OopMapSet* p) {
 167   // Danger Will Robinson! This method allocates a big
 168   // chunk of memory, its your job to free it.
 169   if (p != NULL) {
 170     _oop_maps = ImmutableOopMapSet::build_from(p);
 171   } else {
 172     _oop_maps = NULL;
 173   }
 174 }
 175 
 176 
 177 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
 178   // Do not hold the CodeCache lock during name formatting.
 179   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
 180 
 181   if (stub != NULL) {
 182     char stub_id[256];
 183     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
 184     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
 185     if (PrintStubCode) {
 186       ttyLocker ttyl;
 187       tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
 188       Disassembler::decode(stub->code_begin(), stub->code_end());
 189       tty->cr();
 190     }
 191     Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
 192 
 193     if (JvmtiExport::should_post_dynamic_code_generated()) {
 194       const char* stub_name = name2;
 195       if (name2[0] == '\0')  stub_name = name1;
 196       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
 197     }
 198   }
 199 
 200   // Track memory usage statistic after releasing CodeCache_lock
 201   MemoryService::track_code_cache_memory_usage();
 202 }
 203 
 204 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
 205   assert(_oop_maps != NULL, "nope");
 206   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
 207 }
 208 
 209 void CodeBlob::print_code() {
 210   ResourceMark m;
 211   Disassembler::decode(this, tty);
 212 }
 213 
 214 //----------------------------------------------------------------------------------------------------
 215 // Implementation of BufferBlob
 216 
 217 
 218 BufferBlob::BufferBlob(const char* name, int size)
 219 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
 220 {}
 221 
 222 BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
 223   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 224 
 225   BufferBlob* blob = NULL;
 226   unsigned int size = sizeof(BufferBlob);
 227   // align the size to CodeEntryAlignment
 228   size = CodeBlob::align_code_offset(size);
 229   size += align_up(buffer_size, oopSize);
 230   assert(name != NULL, "must provide a name");
 231   {
 232     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 233     blob = new (size) BufferBlob(name, size);
 234   }
 235   // Track memory usage statistic after releasing CodeCache_lock
 236   MemoryService::track_code_cache_memory_usage();
 237 
 238   return blob;
 239 }
 240 
 241 
 242 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
 243   : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
 244 {}
 245 
 246 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
 247   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 248 
 249   BufferBlob* blob = NULL;
 250   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
 251   assert(name != NULL, "must provide a name");
 252   {
 253     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 254     blob = new (size) BufferBlob(name, size, cb);
 255   }
 256   // Track memory usage statistic after releasing CodeCache_lock
 257   MemoryService::track_code_cache_memory_usage();
 258 
 259   return blob;
 260 }
 261 
 262 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
 263   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
 264 }
 265 
 266 void BufferBlob::free(BufferBlob *blob) {
 267   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 268   blob->flush();
 269   {
 270     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 271     CodeCache::free((RuntimeBlob*)blob);
 272   }
 273   // Track memory usage statistic after releasing CodeCache_lock
 274   MemoryService::track_code_cache_memory_usage();
 275 }
 276 
 277 
 278 //----------------------------------------------------------------------------------------------------
 279 // Implementation of AdapterBlob
 280 
 281 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
 282   BufferBlob("I2C/C2I adapters", size, cb) {
 283   CodeCache::commit(this);
 284 }
 285 
 286 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
 287   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 288 
 289   AdapterBlob* blob = NULL;
 290   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
 291   {
 292     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 293     blob = new (size) AdapterBlob(size, cb);
 294   }
 295   // Track memory usage statistic after releasing CodeCache_lock
 296   MemoryService::track_code_cache_memory_usage();
 297 
 298   return blob;
 299 }
 300 
 301 VtableBlob::VtableBlob(const char* name, int size) :
 302   BufferBlob(name, size) {
 303 }
 304 
 305 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
 306   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 307 
 308   VtableBlob* blob = NULL;
 309   unsigned int size = sizeof(VtableBlob);
 310   // align the size to CodeEntryAlignment
 311   size = align_code_offset(size);
 312   size += align_up(buffer_size, oopSize);
 313   assert(name != NULL, "must provide a name");
 314   {
 315     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 316     blob = new (size) VtableBlob(name, size);
 317   }
 318   // Track memory usage statistic after releasing CodeCache_lock
 319   MemoryService::track_code_cache_memory_usage();
 320 
 321   return blob;
 322 }
 323 
 324 //----------------------------------------------------------------------------------------------------
 325 // Implementation of MethodHandlesAdapterBlob
 326 
 327 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
 328   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 329 
 330   MethodHandlesAdapterBlob* blob = NULL;
 331   unsigned int size = sizeof(MethodHandlesAdapterBlob);
 332   // align the size to CodeEntryAlignment
 333   size = CodeBlob::align_code_offset(size);
 334   size += align_up(buffer_size, oopSize);
 335   {
 336     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 337     blob = new (size) MethodHandlesAdapterBlob(size);
 338     if (blob == NULL) {
 339       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
 340     }
 341   }
 342   // Track memory usage statistic after releasing CodeCache_lock
 343   MemoryService::track_code_cache_memory_usage();
 344 
 345   return blob;
 346 }
 347 
 348 //----------------------------------------------------------------------------------------------------
 349 // Implementation of RuntimeStub
 350 
 351 RuntimeStub::RuntimeStub(
 352   const char* name,
 353   CodeBuffer* cb,
 354   int         size,
 355   int         frame_complete,
 356   int         frame_size,
 357   OopMapSet*  oop_maps,
 358   bool        caller_must_gc_arguments
 359 )
 360 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
 361 {
 362 }
 363 
 364 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
 365                                            CodeBuffer* cb,
 366                                            int frame_complete,
 367                                            int frame_size,
 368                                            OopMapSet* oop_maps,
 369                                            bool caller_must_gc_arguments)
 370 {
 371   RuntimeStub* stub = NULL;
 372   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 373   {
 374     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 375     unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
 376     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
 377   }
 378 
 379   trace_new_stub(stub, "RuntimeStub - ", stub_name);
 380 
 381   return stub;
 382 }
 383 
 384 
 385 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
 386   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
 387   if (!p) fatal("Initial size of CodeCache is too small");
 388   return p;
 389 }
 390 
 391 // operator new shared by all singletons:
 392 void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
 393   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
 394   if (!p) fatal("Initial size of CodeCache is too small");
 395   return p;
 396 }
 397 
 398 
 399 //----------------------------------------------------------------------------------------------------
 400 // Implementation of DeoptimizationBlob
 401 
 402 DeoptimizationBlob::DeoptimizationBlob(
 403   CodeBuffer* cb,
 404   int         size,
 405   OopMapSet*  oop_maps,
 406   int         unpack_offset,
 407   int         unpack_with_exception_offset,
 408   int         unpack_with_reexecution_offset,
 409   int         frame_size
 410 )
 411 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
 412 {
 413   _unpack_offset           = unpack_offset;
 414   _unpack_with_exception   = unpack_with_exception_offset;
 415   _unpack_with_reexecution = unpack_with_reexecution_offset;
 416 #ifdef COMPILER1
 417   _unpack_with_exception_in_tls   = -1;
 418 #endif
 419 }
 420 
 421 
 422 DeoptimizationBlob* DeoptimizationBlob::create(
 423   CodeBuffer* cb,
 424   OopMapSet*  oop_maps,
 425   int        unpack_offset,
 426   int        unpack_with_exception_offset,
 427   int        unpack_with_reexecution_offset,
 428   int        frame_size)
 429 {
 430   DeoptimizationBlob* blob = NULL;
 431   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 432   {
 433     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 434     unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
 435     blob = new (size) DeoptimizationBlob(cb,
 436                                          size,
 437                                          oop_maps,
 438                                          unpack_offset,
 439                                          unpack_with_exception_offset,
 440                                          unpack_with_reexecution_offset,
 441                                          frame_size);
 442   }
 443 
 444   trace_new_stub(blob, "DeoptimizationBlob");
 445 
 446   return blob;
 447 }
 448 
 449 
 450 //----------------------------------------------------------------------------------------------------
 451 // Implementation of UncommonTrapBlob
 452 
 453 #ifdef COMPILER2
 454 UncommonTrapBlob::UncommonTrapBlob(
 455   CodeBuffer* cb,
 456   int         size,
 457   OopMapSet*  oop_maps,
 458   int         frame_size
 459 )
 460 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
 461 {}
 462 
 463 
 464 UncommonTrapBlob* UncommonTrapBlob::create(
 465   CodeBuffer* cb,
 466   OopMapSet*  oop_maps,
 467   int        frame_size)
 468 {
 469   UncommonTrapBlob* blob = NULL;
 470   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 471   {
 472     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 473     unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
 474     blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
 475   }
 476 
 477   trace_new_stub(blob, "UncommonTrapBlob");
 478 
 479   return blob;
 480 }
 481 
 482 
 483 #endif // COMPILER2
 484 
 485 
 486 //----------------------------------------------------------------------------------------------------
 487 // Implementation of ExceptionBlob
 488 
 489 #ifdef COMPILER2
 490 ExceptionBlob::ExceptionBlob(
 491   CodeBuffer* cb,
 492   int         size,
 493   OopMapSet*  oop_maps,
 494   int         frame_size
 495 )
 496 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
 497 {}
 498 
 499 
 500 ExceptionBlob* ExceptionBlob::create(
 501   CodeBuffer* cb,
 502   OopMapSet*  oop_maps,
 503   int         frame_size)
 504 {
 505   ExceptionBlob* blob = NULL;
 506   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 507   {
 508     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 509     unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
 510     blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
 511   }
 512 
 513   trace_new_stub(blob, "ExceptionBlob");
 514 
 515   return blob;
 516 }
 517 
 518 
 519 #endif // COMPILER2
 520 
 521 
 522 //----------------------------------------------------------------------------------------------------
 523 // Implementation of SafepointBlob
 524 
 525 SafepointBlob::SafepointBlob(
 526   CodeBuffer* cb,
 527   int         size,
 528   OopMapSet*  oop_maps,
 529   int         frame_size
 530 )
 531 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
 532 {}
 533 
 534 
 535 SafepointBlob* SafepointBlob::create(
 536   CodeBuffer* cb,
 537   OopMapSet*  oop_maps,
 538   int         frame_size)
 539 {
 540   SafepointBlob* blob = NULL;
 541   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 542   {
 543     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 544     unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
 545     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
 546   }
 547 
 548   trace_new_stub(blob, "SafepointBlob");
 549 
 550   return blob;
 551 }
 552 
 553 
 554 //----------------------------------------------------------------------------------------------------
 555 // Verification and printing
 556 
 557 void CodeBlob::print_on(outputStream* st) const {
 558   st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
 559   st->print_cr("Framesize: %d", _frame_size);
 560 }
 561 
 562 void CodeBlob::print_value_on(outputStream* st) const {
 563   st->print_cr("[CodeBlob]");
 564 }
 565 
 566 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
 567   if (is_buffer_blob()) {
 568     // the interpreter is generated into a buffer blob
 569     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
 570     if (i != NULL) {
 571       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
 572       i->print_on(st);
 573       return;
 574     }
 575     if (Interpreter::contains(addr)) {
 576       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
 577                    " (not bytecode specific)", p2i(addr));
 578       return;
 579     }
 580     //
 581     if (AdapterHandlerLibrary::contains(this)) {
 582       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
 583       AdapterHandlerLibrary::print_handler_on(st, this);
 584     }
 585     // the stubroutines are generated into a buffer blob
 586     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
 587     if (d != NULL) {
 588       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
 589       d->print_on(st);
 590       st->cr();
 591       return;
 592     }
 593     if (StubRoutines::contains(addr)) {
 594       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
 595       return;
 596     }
 597     // the InlineCacheBuffer is using stubs generated into a buffer blob
 598     if (InlineCacheBuffer::contains(addr)) {
 599       st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
 600       return;
 601     }
 602     VtableStub* v = VtableStubs::stub_containing(addr);
 603     if (v != NULL) {
 604       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
 605       v->print_on(st);
 606       st->cr();
 607       return;
 608     }
 609   }
 610   if (is_nmethod()) {
 611     nmethod* nm = (nmethod*)this;
 612     ResourceMark rm;
 613     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
 614               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
 615     if (verbose) {
 616       st->print(" for ");
 617       nm->method()->print_value_on(st);
 618     }
 619     st->cr();
 620     nm->print_nmethod(verbose);
 621     return;
 622   }
 623   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
 624   print_on(st);
 625 }
 626 
 627 void RuntimeBlob::verify() {
 628   ShouldNotReachHere();
 629 }
 630 
 631 void BufferBlob::verify() {
 632   // unimplemented
 633 }
 634 
 635 void BufferBlob::print_on(outputStream* st) const {
 636   RuntimeBlob::print_on(st);
 637   print_value_on(st);
 638 }
 639 
 640 void BufferBlob::print_value_on(outputStream* st) const {
 641   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
 642 }
 643 
 644 void RuntimeStub::verify() {
 645   // unimplemented
 646 }
 647 
 648 void RuntimeStub::print_on(outputStream* st) const {
 649   ttyLocker ttyl;
 650   RuntimeBlob::print_on(st);
 651   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
 652   st->print_cr("%s", name());
 653   Disassembler::decode((RuntimeBlob*)this, st);
 654 }
 655 
 656 void RuntimeStub::print_value_on(outputStream* st) const {
 657   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
 658 }
 659 
 660 void SingletonBlob::verify() {
 661   // unimplemented
 662 }
 663 
 664 void SingletonBlob::print_on(outputStream* st) const {
 665   ttyLocker ttyl;
 666   RuntimeBlob::print_on(st);
 667   st->print_cr("%s", name());
 668   Disassembler::decode((RuntimeBlob*)this, st);
 669 }
 670 
 671 void SingletonBlob::print_value_on(outputStream* st) const {
 672   st->print_cr("%s", name());
 673 }
 674 
 675 void DeoptimizationBlob::print_value_on(outputStream* st) const {
 676   st->print_cr("Deoptimization (frame not available)");
 677 }