1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP 26 #define SHARE_VM_CODE_NMETHOD_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "code/pcDesc.hpp" 30 #include "oops/metadata.hpp" 31 32 // This class is used internally by nmethods, to cache 33 // exception/pc/handler information. 34 35 class ExceptionCache : public CHeapObj<mtCode> { 36 friend class VMStructs; 37 private: 38 enum { cache_size = 16 }; 39 Klass* _exception_type; 40 address _pc[cache_size]; 41 address _handler[cache_size]; 42 int _count; 43 ExceptionCache* _next; 44 45 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } 46 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } 47 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } 48 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } 49 int count() { return _count; } 50 void increment_count() { _count++; } 51 52 public: 53 54 ExceptionCache(Handle exception, address pc, address handler); 55 56 Klass* exception_type() { return _exception_type; } 57 ExceptionCache* next() { return _next; } 58 void set_next(ExceptionCache *ec) { _next = ec; } 59 60 address match(Handle exception, address pc); 61 bool match_exception_with_space(Handle exception) ; 62 address test_address(address addr); 63 bool add_address_and_handler(address addr, address handler) ; 64 }; 65 66 67 // cache pc descs found in earlier inquiries 68 class PcDescCache VALUE_OBJ_CLASS_SPEC { 69 friend class VMStructs; 70 private: 71 enum { cache_size = 4 }; 72 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found 73 public: 74 PcDescCache() { debug_only(_pc_descs[0] = NULL); } 75 void reset_to(PcDesc* initial_pc_desc); 76 PcDesc* find_pc_desc(int pc_offset, bool approximate); 77 void add_pc_desc(PcDesc* pc_desc); 78 PcDesc* last_pc_desc() { return _pc_descs[0]; } 79 }; 80 81 82 // nmethods (native methods) are the compiled code versions of Java methods. 83 // 84 // An nmethod contains: 85 // - header (the nmethod structure) 86 // [Relocation] 87 // - relocation information 88 // - constant part (doubles, longs and floats used in nmethod) 89 // - oop table 90 // [Code] 91 // - code body 92 // - exception handler 93 // - stub code 94 // [Debugging information] 95 // - oop array 96 // - data array 97 // - pcs 98 // [Exception handler table] 99 // - handler entry point array 100 // [Implicit Null Pointer exception table] 101 // - implicit null table array 102 103 class Dependencies; 104 class ExceptionHandlerTable; 105 class ImplicitExceptionTable; 106 class AbstractCompiler; 107 class xmlStream; 108 109 class nmethod : public CodeBlob { 110 friend class VMStructs; 111 friend class NMethodSweeper; 112 friend class CodeCache; // scavengable oops 113 private: 114 // Shared fields for all nmethod's 115 Method* _method; 116 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 117 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 118 119 // To support simple linked-list chaining of nmethods: 120 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 121 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods 122 nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect 123 124 static nmethod* volatile _oops_do_mark_nmethods; 125 nmethod* volatile _oops_do_mark_link; 126 127 AbstractCompiler* _compiler; // The compiler which compiled this nmethod 128 129 // offsets for entry points 130 address _entry_point; // entry point with class check 131 address _verified_entry_point; // entry point without class check 132 address _osr_entry_point; // entry point for on stack replacement 133 134 // Offsets for different nmethod parts 135 int _exception_offset; 136 // All deoptee's will resume execution at this location described by 137 // this offset. 138 int _deoptimize_offset; 139 // All deoptee's at a MethodHandle call site will resume execution 140 // at this location described by this offset. 141 int _deoptimize_mh_offset; 142 // Offset of the unwind handler if it exists 143 int _unwind_handler_offset; 144 145 #ifdef HAVE_DTRACE_H 146 int _trap_offset; 147 #endif // def HAVE_DTRACE_H 148 int _consts_offset; 149 int _stub_offset; 150 int _oops_offset; // offset to where embedded oop table begins (inside data) 151 int _metadata_offset; // embedded meta data table 152 int _scopes_data_offset; 153 int _scopes_pcs_offset; 154 int _dependencies_offset; 155 int _handler_table_offset; 156 int _nul_chk_table_offset; 157 int _nmethod_end_offset; 158 159 // location in frame (offset for sp) that deopt can store the original 160 // pc during a deopt. 161 int _orig_pc_offset; 162 163 int _compile_id; // which compilation made this nmethod 164 int _comp_level; // compilation level 165 166 // protected by CodeCache_lock 167 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 168 bool _speculatively_disconnected; // Marked for potential unload 169 170 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper) 171 bool _marked_for_deoptimization; // Used for stack deoptimization 172 173 // used by jvmti to track if an unload event has been posted for this nmethod. 174 bool _unload_reported; 175 176 // set during construction 177 unsigned int _has_unsafe_access:1; // May fault due to unsafe access. 178 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? 179 unsigned int _lazy_critical_native:1; // Lazy JNI critical native 180 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints 181 182 // Protected by Patching_lock 183 unsigned char _state; // {alive, not_entrant, zombie, unloaded} 184 185 #ifdef ASSERT 186 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 187 #endif 188 189 enum { alive = 0, 190 not_entrant = 1, // uncommon trap has happened but activations may still exist 191 zombie = 2, 192 unloaded = 3 }; 193 194 195 jbyte _scavenge_root_state; 196 197 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 198 // and is not made into a zombie. However, once the nmethod is made into 199 // a zombie, it will be locked one final time if CompiledMethodUnload 200 // event processing needs to be done. 201 jint _lock_count; 202 203 // not_entrant method removal. Each mark_sweep pass will update 204 // this mark to current sweep invocation count if it is seen on the 205 // stack. An not_entrant method can be removed when there is no 206 // more activations, i.e., when the _stack_traversal_mark is less than 207 // current sweep traversal index. 208 long _stack_traversal_mark; 209 210 ExceptionCache *_exception_cache; 211 PcDescCache _pc_desc_cache; 212 213 // These are used for compiled synchronized native methods to 214 // locate the owner and stack slot for the BasicLock so that we can 215 // properly revoke the bias of the owner if necessary. They are 216 // needed because there is no debug information for compiled native 217 // wrappers and the oop maps are insufficient to allow 218 // frame::retrieve_receiver() to work. Currently they are expected 219 // to be byte offsets from the Java stack pointer for maximum code 220 // sharing between platforms. Note that currently biased locking 221 // will never cause Class instances to be biased but this code 222 // handles the static synchronized case as well. 223 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 224 // for non-static native wrapper frames. 225 ByteSize _native_receiver_sp_offset; 226 ByteSize _native_basic_lock_sp_offset; 227 228 friend class nmethodLocker; 229 230 // For native wrappers 231 nmethod(Method* method, 232 int nmethod_size, 233 int compile_id, 234 CodeOffsets* offsets, 235 CodeBuffer *code_buffer, 236 int frame_size, 237 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 238 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 239 OopMapSet* oop_maps); 240 241 #ifdef HAVE_DTRACE_H 242 // For native wrappers 243 nmethod(Method* method, 244 int nmethod_size, 245 CodeOffsets* offsets, 246 CodeBuffer *code_buffer, 247 int frame_size); 248 #endif // def HAVE_DTRACE_H 249 250 // Creation support 251 nmethod(Method* method, 252 int nmethod_size, 253 int compile_id, 254 int entry_bci, 255 CodeOffsets* offsets, 256 int orig_pc_offset, 257 DebugInformationRecorder *recorder, 258 Dependencies* dependencies, 259 CodeBuffer *code_buffer, 260 int frame_size, 261 OopMapSet* oop_maps, 262 ExceptionHandlerTable* handler_table, 263 ImplicitExceptionTable* nul_chk_table, 264 AbstractCompiler* compiler, 265 int comp_level); 266 267 // helper methods 268 void* operator new(size_t size, int nmethod_size); 269 270 const char* reloc_string_for(u_char* begin, u_char* end); 271 // Returns true if this thread changed the state of the nmethod or 272 // false if another thread performed the transition. 273 bool make_not_entrant_or_zombie(unsigned int state); 274 void inc_decompile_count(); 275 276 // Used to manipulate the exception cache 277 void add_exception_cache_entry(ExceptionCache* new_entry); 278 ExceptionCache* exception_cache_entry_for_exception(Handle exception); 279 280 // Inform external interfaces that a compiled method has been unloaded 281 void post_compiled_method_unload(); 282 283 // Initailize fields to their default values 284 void init_defaults(); 285 286 public: 287 // create nmethod with entry_bci 288 static nmethod* new_nmethod(methodHandle method, 289 int compile_id, 290 int entry_bci, 291 CodeOffsets* offsets, 292 int orig_pc_offset, 293 DebugInformationRecorder* recorder, 294 Dependencies* dependencies, 295 CodeBuffer *code_buffer, 296 int frame_size, 297 OopMapSet* oop_maps, 298 ExceptionHandlerTable* handler_table, 299 ImplicitExceptionTable* nul_chk_table, 300 AbstractCompiler* compiler, 301 int comp_level); 302 303 static nmethod* new_native_nmethod(methodHandle method, 304 int compile_id, 305 CodeBuffer *code_buffer, 306 int vep_offset, 307 int frame_complete, 308 int frame_size, 309 ByteSize receiver_sp_offset, 310 ByteSize basic_lock_sp_offset, 311 OopMapSet* oop_maps); 312 313 #ifdef HAVE_DTRACE_H 314 // The method we generate for a dtrace probe has to look 315 // like an nmethod as far as the rest of the system is concerned 316 // which is somewhat unfortunate. 317 static nmethod* new_dtrace_nmethod(methodHandle method, 318 CodeBuffer *code_buffer, 319 int vep_offset, 320 int trap_offset, 321 int frame_complete, 322 int frame_size); 323 324 int trap_offset() const { return _trap_offset; } 325 address trap_address() const { return insts_begin() + _trap_offset; } 326 327 #endif // def HAVE_DTRACE_H 328 329 // accessors 330 Method* method() const { return _method; } 331 AbstractCompiler* compiler() const { return _compiler; } 332 333 // type info 334 bool is_nmethod() const { return true; } 335 bool is_java_method() const { return !method()->is_native(); } 336 bool is_native_method() const { return method()->is_native(); } 337 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 338 339 bool is_compiled_by_c1() const; 340 bool is_compiled_by_c2() const; 341 bool is_compiled_by_shark() const; 342 343 // boundaries for different parts 344 address consts_begin () const { return header_begin() + _consts_offset ; } 345 address consts_end () const { return header_begin() + code_offset() ; } 346 address insts_begin () const { return header_begin() + code_offset() ; } 347 address insts_end () const { return header_begin() + _stub_offset ; } 348 address stub_begin () const { return header_begin() + _stub_offset ; } 349 address stub_end () const { return header_begin() + _oops_offset ; } 350 address exception_begin () const { return header_begin() + _exception_offset ; } 351 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } 352 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } 353 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 354 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 355 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 356 357 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 358 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; } 359 360 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } 361 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 362 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 363 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 364 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 365 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 366 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 367 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 368 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 369 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 370 371 // Sizes 372 int consts_size () const { return consts_end () - consts_begin (); } 373 int insts_size () const { return insts_end () - insts_begin (); } 374 int stub_size () const { return stub_end () - stub_begin (); } 375 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 376 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 377 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } 378 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); } 379 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 380 int handler_table_size() const { return handler_table_end() - handler_table_begin(); } 381 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } 382 383 int total_size () const; 384 385 // Containment 386 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } 387 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } 388 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } 389 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 390 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 391 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 392 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 393 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } 394 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } 395 396 // entry points 397 address entry_point() const { return _entry_point; } // normal entry point 398 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 399 400 // flag accessing and manipulation 401 bool is_in_use() const { return _state == alive; } 402 bool is_alive() const { return _state == alive || _state == not_entrant; } 403 bool is_not_entrant() const { return _state == not_entrant; } 404 bool is_zombie() const { return _state == zombie; } 405 bool is_unloaded() const { return _state == unloaded; } 406 407 // Make the nmethod non entrant. The nmethod will continue to be 408 // alive. It is used when an uncommon trap happens. Returns true 409 // if this thread changed the state of the nmethod or false if 410 // another thread performed the transition. 411 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } 412 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 413 414 // used by jvmti to track if the unload event has been reported 415 bool unload_reported() { return _unload_reported; } 416 void set_unload_reported() { _unload_reported = true; } 417 418 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } 419 void mark_for_deoptimization() { _marked_for_deoptimization = true; } 420 421 void make_unloaded(BoolObjectClosure* is_alive, oop cause); 422 423 bool has_dependencies() { return dependencies_size() != 0; } 424 void flush_dependencies(BoolObjectClosure* is_alive); 425 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 426 void set_has_flushed_dependencies() { 427 assert(!has_flushed_dependencies(), "should only happen once"); 428 _has_flushed_dependencies = 1; 429 } 430 431 bool is_marked_for_reclamation() const { return _marked_for_reclamation; } 432 void mark_for_reclamation() { _marked_for_reclamation = 1; } 433 434 bool has_unsafe_access() const { return _has_unsafe_access; } 435 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 436 437 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 438 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 439 440 bool is_speculatively_disconnected() const { return _speculatively_disconnected; } 441 void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; } 442 443 bool is_lazy_critical_native() const { return _lazy_critical_native; } 444 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } 445 446 bool has_wide_vectors() const { return _has_wide_vectors; } 447 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } 448 449 int comp_level() const { return _comp_level; } 450 451 // Support for oops in scopes and relocs: 452 // Note: index 0 is reserved for null. 453 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } 454 oop* oop_addr_at(int index) const { // for GC 455 // relocation indexes are biased by 1 (because 0 is reserved) 456 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index"); 457 assert(!_oops_are_stale, "oops are stale"); 458 return &oops_begin()[index - 1]; 459 } 460 461 // Support for meta data in scopes and relocs: 462 // Note: index 0 is reserved for null. 463 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 464 Metadata** metadata_addr_at(int index) const { // for GC 465 // relocation indexes are biased by 1 (because 0 is reserved) 466 assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index"); 467 return &metadata_begin()[index - 1]; 468 } 469 470 void copy_values(GrowableArray<jobject>* oops); 471 void copy_values(GrowableArray<Metadata*>* metadata); 472 473 // Relocation support 474 private: 475 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 476 inline void initialize_immediate_oop(oop* dest, jobject handle); 477 478 public: 479 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 480 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 481 void verify_oop_relocations(); 482 483 bool is_at_poll_return(address pc); 484 bool is_at_poll_or_poll_return(address pc); 485 486 // Scavengable oop support 487 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } 488 protected: 489 enum { sl_on_list = 0x01, sl_marked = 0x10 }; 490 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } 491 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } 492 // assertion-checking and pruning logic uses the bits of _scavenge_root_state 493 #ifndef PRODUCT 494 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } 495 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } 496 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } 497 // N.B. there is no positive marked query, and we only use the not_marked query for asserts. 498 #endif //PRODUCT 499 nmethod* scavenge_root_link() const { return _scavenge_root_link; } 500 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } 501 502 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; } 503 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; } 504 505 public: 506 507 // Sweeper support 508 long stack_traversal_mark() { return _stack_traversal_mark; } 509 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 510 511 // Exception cache support 512 ExceptionCache* exception_cache() const { return _exception_cache; } 513 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } 514 address handler_for_exception_and_pc(Handle exception, address pc); 515 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); 516 void remove_from_exception_cache(ExceptionCache* ec); 517 518 // implicit exceptions support 519 address continuation_for_implicit_exception(address pc); 520 521 // On-stack replacement support 522 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 523 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 524 void invalidate_osr_method(); 525 nmethod* osr_link() const { return _osr_link; } 526 void set_osr_link(nmethod *n) { _osr_link = n; } 527 528 // tells whether frames described by this nmethod can be deoptimized 529 // note: native wrappers cannot be deoptimized. 530 bool can_be_deoptimized() const { return is_java_method(); } 531 532 // Inline cache support 533 void clear_inline_caches(); 534 void cleanup_inline_caches(); 535 bool inlinecache_check_contains(address addr) const { 536 return (addr >= code_begin() && addr < verified_entry_point()); 537 } 538 539 // Check that all metadata is still alive 540 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); 541 542 // unlink and deallocate this nmethod 543 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 544 // expected to use any other private methods/data in this class. 545 546 protected: 547 void flush(); 548 549 public: 550 // When true is returned, it is unsafe to remove this nmethod even if 551 // it is a zombie, since the VM or the ServiceThread might still be 552 // using it. 553 bool is_locked_by_vm() const { return _lock_count >0; } 554 555 // See comment at definition of _last_seen_on_stack 556 void mark_as_seen_on_stack(); 557 bool can_not_entrant_be_converted(); 558 559 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 560 void set_method(Method* method) { _method = method; } 561 562 // GC support 563 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 564 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); 565 566 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, 567 OopClosure* f); 568 void oops_do(OopClosure* f) { oops_do(f, false); } 569 void oops_do(OopClosure* f, bool allow_zombie); 570 bool detect_scavenge_root_oops(); 571 void verify_scavenge_root_oops() PRODUCT_RETURN; 572 573 bool test_set_oops_do_mark(); 574 static void oops_do_marking_prologue(); 575 static void oops_do_marking_epilogue(); 576 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 577 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 578 579 // ScopeDesc for an instruction 580 ScopeDesc* scope_desc_at(address pc); 581 582 private: 583 ScopeDesc* scope_desc_in(address begin, address end); 584 585 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } 586 587 PcDesc* find_pc_desc_internal(address pc, bool approximate); 588 589 PcDesc* find_pc_desc(address pc, bool approximate) { 590 PcDesc* desc = _pc_desc_cache.last_pc_desc(); 591 if (desc != NULL && desc->pc_offset() == pc - code_begin()) { 592 return desc; 593 } 594 return find_pc_desc_internal(pc, approximate); 595 } 596 597 public: 598 // ScopeDesc retrieval operation 599 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } 600 // pc_desc_near returns the first PcDesc at or after the givne pc. 601 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } 602 603 public: 604 // copying of debugging information 605 void copy_scopes_pcs(PcDesc* pcs, int count); 606 void copy_scopes_data(address buffer, int size); 607 608 // Deopt 609 // Return true is the PC is one would expect if the frame is being deopted. 610 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } 611 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); } 612 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } 613 // Accessor/mutator for the original pc of a frame before a frame was deopted. 614 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 615 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 616 617 static address get_deopt_original_pc(const frame* fr); 618 619 // MethodHandle 620 bool is_method_handle_return(address return_pc); 621 622 // jvmti support: 623 void post_compiled_method_load_event(); 624 jmethodID get_and_cache_jmethod_id(); 625 626 // verify operations 627 void verify(); 628 void verify_scopes(); 629 void verify_interrupt_point(address interrupt_point); 630 631 // printing support 632 void print() const; 633 void print_code(); 634 void print_relocations() PRODUCT_RETURN; 635 void print_pcs() PRODUCT_RETURN; 636 void print_scopes() PRODUCT_RETURN; 637 void print_dependencies() PRODUCT_RETURN; 638 void print_value_on(outputStream* st) const PRODUCT_RETURN; 639 void print_calls(outputStream* st) PRODUCT_RETURN; 640 void print_handler_table() PRODUCT_RETURN; 641 void print_nul_chk_table() PRODUCT_RETURN; 642 void print_nmethod(bool print_code); 643 644 // need to re-define this from CodeBlob else the overload hides it 645 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 646 void print_on(outputStream* st, const char* msg) const; 647 648 // Logging 649 void log_identity(xmlStream* log) const; 650 void log_new_nmethod() const; 651 void log_state_change() const; 652 653 // Prints block-level comments, including nmethod specific block labels: 654 virtual void print_block_comment(outputStream* stream, address block_begin) const { 655 print_nmethod_labels(stream, block_begin); 656 CodeBlob::print_block_comment(stream, block_begin); 657 } 658 void print_nmethod_labels(outputStream* stream, address block_begin) const; 659 660 // Prints a comment for one native instruction (reloc info, pc desc) 661 void print_code_comment_on(outputStream* st, int column, address begin, address end); 662 static void print_statistics() PRODUCT_RETURN; 663 664 // Compiler task identification. Note that all OSR methods 665 // are numbered in an independent sequence if CICountOSR is true, 666 // and native method wrappers are also numbered independently if 667 // CICountNative is true. 668 int compile_id() const { return _compile_id; } 669 const char* compile_kind() const; 670 671 // For debugging 672 // CompiledIC* IC_at(char* p) const; 673 // PrimitiveIC* primitiveIC_at(char* p) const; 674 oop embeddedOop_at(address p); 675 676 // tells if any of this method's dependencies have been invalidated 677 // (this is expensive!) 678 bool check_all_dependencies(); 679 680 // tells if this compiled method is dependent on the given changes, 681 // and the changes have invalidated it 682 bool check_dependency_on(DepChange& changes); 683 684 // Evolution support. Tells if this compiled method is dependent on any of 685 // methods m() of class dependee, such that if m() in dependee is replaced, 686 // this compiled method will have to be deoptimized. 687 bool is_evol_dependent_on(Klass* dependee); 688 689 // Fast breakpoint support. Tells if this compiled method is 690 // dependent on the given method. Returns true if this nmethod 691 // corresponds to the given method as well. 692 bool is_dependent_on_method(Method* dependee); 693 694 // is it ok to patch at address? 695 bool is_patchable_at(address instr_address); 696 697 // UseBiasedLocking support 698 ByteSize native_receiver_sp_offset() { 699 return _native_receiver_sp_offset; 700 } 701 ByteSize native_basic_lock_sp_offset() { 702 return _native_basic_lock_sp_offset; 703 } 704 705 // support for code generation 706 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 707 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 708 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); } 709 710 // RedefineClasses support. Mark metadata in nmethods as on_stack so that 711 // redefine classes doesn't purge it. 712 static void mark_on_stack(nmethod* nm) { 713 nm->metadata_do(Metadata::mark_on_stack); 714 } 715 void metadata_do(void f(Metadata*)); 716 }; 717 718 // Locks an nmethod so its code will not get removed and it will not 719 // be made into a zombie, even if it is a not_entrant method. After the 720 // nmethod becomes a zombie, if CompiledMethodUnload event processing 721 // needs to be done, then lock_nmethod() is used directly to keep the 722 // generated code from being reused too early. 723 class nmethodLocker : public StackObj { 724 nmethod* _nm; 725 726 public: 727 728 // note: nm can be NULL 729 // Only JvmtiDeferredEvent::compiled_method_unload_event() 730 // should pass zombie_ok == true. 731 static void lock_nmethod(nmethod* nm, bool zombie_ok = false); 732 static void unlock_nmethod(nmethod* nm); // (ditto) 733 734 nmethodLocker(address pc); // derive nm from pc 735 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 736 nmethodLocker() { _nm = NULL; } 737 ~nmethodLocker() { unlock_nmethod(_nm); } 738 739 nmethod* code() { return _nm; } 740 void set_code(nmethod* new_nm) { 741 unlock_nmethod(_nm); // note: This works even if _nm==new_nm. 742 _nm = new_nm; 743 lock_nmethod(_nm); 744 } 745 }; 746 747 #endif // SHARE_VM_CODE_NMETHOD_HPP