1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP 26 #define SHARE_VM_CODE_NMETHOD_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "code/pcDesc.hpp" 30 #include "oops/metadata.hpp" 31 32 class DirectiveSet; 33 34 // This class is used internally by nmethods, to cache 35 // exception/pc/handler information. 36 37 class ExceptionCache : public CHeapObj<mtCode> { 38 friend class VMStructs; 39 private: 40 enum { cache_size = 16 }; 41 Klass* _exception_type; 42 address _pc[cache_size]; 43 address _handler[cache_size]; 44 int _count; 45 ExceptionCache* _next; 46 47 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } 48 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } 49 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } 50 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } 51 int count() { return _count; } 52 void increment_count() { _count++; } 53 54 public: 55 56 ExceptionCache(Handle exception, address pc, address handler); 57 58 Klass* exception_type() { return _exception_type; } 59 ExceptionCache* next() { return _next; } 60 void set_next(ExceptionCache *ec) { _next = ec; } 61 62 address match(Handle exception, address pc); 63 bool match_exception_with_space(Handle exception) ; 64 address test_address(address addr); 65 bool add_address_and_handler(address addr, address handler) ; 66 }; 67 68 69 // cache pc descs found in earlier inquiries 70 class PcDescCache VALUE_OBJ_CLASS_SPEC { 71 friend class VMStructs; 72 private: 73 enum { cache_size = 4 }; 74 // The array elements MUST be volatile! Several threads may modify 75 // and read from the cache concurrently. find_pc_desc_internal has 76 // returned wrong results. C++ compiler (namely xlC12) may duplicate 77 // C++ field accesses if the elements are not volatile. 78 typedef PcDesc* PcDescPtr; 79 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found 80 public: 81 PcDescCache() { debug_only(_pc_descs[0] = NULL); } 82 void reset_to(PcDesc* initial_pc_desc); 83 PcDesc* find_pc_desc(int pc_offset, bool approximate); 84 void add_pc_desc(PcDesc* pc_desc); 85 PcDesc* last_pc_desc() { return _pc_descs[0]; } 86 }; 87 88 89 // nmethods (native methods) are the compiled code versions of Java methods. 90 // 91 // An nmethod contains: 92 // - header (the nmethod structure) 93 // [Relocation] 94 // - relocation information 95 // - constant part (doubles, longs and floats used in nmethod) 96 // - oop table 97 // [Code] 98 // - code body 99 // - exception handler 100 // - stub code 101 // [Debugging information] 102 // - oop array 103 // - data array 104 // - pcs 105 // [Exception handler table] 106 // - handler entry point array 107 // [Implicit Null Pointer exception table] 108 // - implicit null table array 109 110 class DepChange; 111 class Dependencies; 112 class ExceptionHandlerTable; 113 class ImplicitExceptionTable; 114 class AbstractCompiler; 115 class xmlStream; 116 117 class nmethod : public CodeBlob { 118 friend class VMStructs; 119 friend class JVMCIVMStructs; 120 friend class NMethodSweeper; 121 friend class CodeCache; // scavengable oops 122 private: 123 124 // GC support to help figure out if an nmethod has been 125 // cleaned/unloaded by the current GC. 126 static unsigned char _global_unloading_clock; 127 128 // Shared fields for all nmethod's 129 Method* _method; 130 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 131 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 132 133 #if INCLUDE_JVMCI 134 // Needed to keep nmethods alive that are not the default nmethod for the associated Method. 135 oop _jvmci_installed_code; 136 oop _speculation_log; 137 #endif 138 139 // To support simple linked-list chaining of nmethods: 140 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 141 142 union { 143 // Used by G1 to chain nmethods. 144 nmethod* _unloading_next; 145 // Used by non-G1 GCs to chain nmethods. 146 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods 147 }; 148 149 static nmethod* volatile _oops_do_mark_nmethods; 150 nmethod* volatile _oops_do_mark_link; 151 152 AbstractCompiler* _compiler; // The compiler which compiled this nmethod 153 154 // offsets for entry points 155 address _entry_point; // entry point with class check 156 address _verified_entry_point; // entry point without class check 157 address _osr_entry_point; // entry point for on stack replacement 158 159 // Offsets for different nmethod parts 160 int _exception_offset; 161 // All deoptee's will resume execution at this location described by 162 // this offset. 163 int _deoptimize_offset; 164 // All deoptee's at a MethodHandle call site will resume execution 165 // at this location described by this offset. 166 int _deoptimize_mh_offset; 167 // Offset of the unwind handler if it exists 168 int _unwind_handler_offset; 169 170 int _consts_offset; 171 int _stub_offset; 172 int _oops_offset; // offset to where embedded oop table begins (inside data) 173 int _metadata_offset; // embedded meta data table 174 int _scopes_data_offset; 175 int _scopes_pcs_offset; 176 int _dependencies_offset; 177 int _handler_table_offset; 178 int _nul_chk_table_offset; 179 int _nmethod_end_offset; 180 181 // location in frame (offset for sp) that deopt can store the original 182 // pc during a deopt. 183 int _orig_pc_offset; 184 185 int _compile_id; // which compilation made this nmethod 186 int _comp_level; // compilation level 187 188 // protected by CodeCache_lock 189 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 190 191 enum MarkForDeoptimizationStatus { 192 not_marked, 193 deoptimize, 194 deoptimize_noupdate }; 195 196 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization 197 198 // used by jvmti to track if an unload event has been posted for this nmethod. 199 bool _unload_reported; 200 201 // set during construction 202 unsigned int _has_unsafe_access:1; // May fault due to unsafe access. 203 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? 204 unsigned int _lazy_critical_native:1; // Lazy JNI critical native 205 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints 206 207 // Protected by Patching_lock 208 volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded} 209 210 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod 211 212 #ifdef ASSERT 213 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 214 #endif 215 216 jbyte _scavenge_root_state; 217 218 #if INCLUDE_RTM_OPT 219 // RTM state at compile time. Used during deoptimization to decide 220 // whether to restart collecting RTM locking abort statistic again. 221 RTMState _rtm_state; 222 #endif 223 224 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 225 // and is not made into a zombie. However, once the nmethod is made into 226 // a zombie, it will be locked one final time if CompiledMethodUnload 227 // event processing needs to be done. 228 volatile jint _lock_count; 229 230 // not_entrant method removal. Each mark_sweep pass will update 231 // this mark to current sweep invocation count if it is seen on the 232 // stack. An not_entrant method can be removed when there are no 233 // more activations, i.e., when the _stack_traversal_mark is less than 234 // current sweep traversal index. 235 long _stack_traversal_mark; 236 237 // The _hotness_counter indicates the hotness of a method. The higher 238 // the value the hotter the method. The hotness counter of a nmethod is 239 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 240 // is active while stack scanning (mark_active_nmethods()). The hotness 241 // counter is decreased (by 1) while sweeping. 242 int _hotness_counter; 243 244 ExceptionCache *_exception_cache; 245 PcDescCache _pc_desc_cache; 246 247 // These are used for compiled synchronized native methods to 248 // locate the owner and stack slot for the BasicLock so that we can 249 // properly revoke the bias of the owner if necessary. They are 250 // needed because there is no debug information for compiled native 251 // wrappers and the oop maps are insufficient to allow 252 // frame::retrieve_receiver() to work. Currently they are expected 253 // to be byte offsets from the Java stack pointer for maximum code 254 // sharing between platforms. Note that currently biased locking 255 // will never cause Class instances to be biased but this code 256 // handles the static synchronized case as well. 257 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 258 // for non-static native wrapper frames. 259 ByteSize _native_receiver_sp_offset; 260 ByteSize _native_basic_lock_sp_offset; 261 262 friend class nmethodLocker; 263 264 // For native wrappers 265 nmethod(Method* method, 266 int nmethod_size, 267 int compile_id, 268 CodeOffsets* offsets, 269 CodeBuffer *code_buffer, 270 int frame_size, 271 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 272 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 273 OopMapSet* oop_maps); 274 275 // Creation support 276 nmethod(Method* method, 277 int nmethod_size, 278 int compile_id, 279 int entry_bci, 280 CodeOffsets* offsets, 281 int orig_pc_offset, 282 DebugInformationRecorder *recorder, 283 Dependencies* dependencies, 284 CodeBuffer *code_buffer, 285 int frame_size, 286 OopMapSet* oop_maps, 287 ExceptionHandlerTable* handler_table, 288 ImplicitExceptionTable* nul_chk_table, 289 AbstractCompiler* compiler, 290 int comp_level 291 #if INCLUDE_JVMCI 292 , Handle installed_code, 293 Handle speculation_log 294 #endif 295 ); 296 297 // helper methods 298 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 299 300 const char* reloc_string_for(u_char* begin, u_char* end); 301 // Returns true if this thread changed the state of the nmethod or 302 // false if another thread performed the transition. 303 bool make_not_entrant_or_zombie(unsigned int state); 304 void inc_decompile_count(); 305 306 // Used to manipulate the exception cache 307 void add_exception_cache_entry(ExceptionCache* new_entry); 308 ExceptionCache* exception_cache_entry_for_exception(Handle exception); 309 310 // Inform external interfaces that a compiled method has been unloaded 311 void post_compiled_method_unload(); 312 313 // Initailize fields to their default values 314 void init_defaults(); 315 316 public: 317 // create nmethod with entry_bci 318 static nmethod* new_nmethod(const methodHandle& method, 319 int compile_id, 320 int entry_bci, 321 CodeOffsets* offsets, 322 int orig_pc_offset, 323 DebugInformationRecorder* recorder, 324 Dependencies* dependencies, 325 CodeBuffer *code_buffer, 326 int frame_size, 327 OopMapSet* oop_maps, 328 ExceptionHandlerTable* handler_table, 329 ImplicitExceptionTable* nul_chk_table, 330 AbstractCompiler* compiler, 331 int comp_level 332 #if INCLUDE_JVMCI 333 , Handle installed_code = Handle(), 334 Handle speculation_log = Handle() 335 #endif 336 ); 337 338 static nmethod* new_native_nmethod(const methodHandle& method, 339 int compile_id, 340 CodeBuffer *code_buffer, 341 int vep_offset, 342 int frame_complete, 343 int frame_size, 344 ByteSize receiver_sp_offset, 345 ByteSize basic_lock_sp_offset, 346 OopMapSet* oop_maps); 347 348 // accessors 349 Method* method() const { return _method; } 350 AbstractCompiler* compiler() const { return _compiler; } 351 352 // type info 353 bool is_nmethod() const { return true; } 354 bool is_java_method() const { return !method()->is_native(); } 355 bool is_native_method() const { return method()->is_native(); } 356 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 357 358 bool is_compiled_by_c1() const; 359 bool is_compiled_by_jvmci() const; 360 bool is_compiled_by_c2() const; 361 bool is_compiled_by_shark() const; 362 363 // boundaries for different parts 364 address consts_begin () const { return header_begin() + _consts_offset ; } 365 address consts_end () const { return header_begin() + code_offset() ; } 366 address insts_begin () const { return header_begin() + code_offset() ; } 367 address insts_end () const { return header_begin() + _stub_offset ; } 368 address stub_begin () const { return header_begin() + _stub_offset ; } 369 address stub_end () const { return header_begin() + _oops_offset ; } 370 address exception_begin () const { return header_begin() + _exception_offset ; } 371 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } 372 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } 373 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 374 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 375 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 376 377 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 378 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; } 379 380 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } 381 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 382 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 383 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 384 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 385 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 386 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 387 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 388 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 389 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 390 391 // Sizes 392 int consts_size () const { return consts_end () - consts_begin (); } 393 int insts_size () const { return insts_end () - insts_begin (); } 394 int stub_size () const { return stub_end () - stub_begin (); } 395 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 396 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 397 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } 398 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); } 399 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 400 int handler_table_size() const { return handler_table_end() - handler_table_begin(); } 401 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } 402 403 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 404 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 405 406 int total_size () const; 407 408 void dec_hotness_counter() { _hotness_counter--; } 409 void set_hotness_counter(int val) { _hotness_counter = val; } 410 int hotness_counter() const { return _hotness_counter; } 411 412 // Containment 413 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } 414 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } 415 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } 416 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 417 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 418 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 419 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 420 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } 421 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } 422 423 // entry points 424 address entry_point() const { return _entry_point; } // normal entry point 425 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 426 427 enum { in_use = 0, // executable nmethod 428 not_entrant = 1, // marked for deoptimization but activations may still exist, 429 // will be transformed to zombie when all activations are gone 430 zombie = 2, // no activations exist, nmethod is ready for purge 431 unloaded = 3 }; // there should be no activations, should not be called, 432 // will be transformed to zombie immediately 433 434 // flag accessing and manipulation 435 bool is_in_use() const { return _state == in_use; } 436 bool is_alive() const { return _state == in_use || _state == not_entrant; } 437 bool is_not_entrant() const { return _state == not_entrant; } 438 bool is_zombie() const { return _state == zombie; } 439 bool is_unloaded() const { return _state == unloaded; } 440 441 // returns a string version of the nmethod state 442 const char* state() const { 443 switch(_state) { 444 case in_use: return "in use"; 445 case not_entrant: return "not_entrant"; 446 case zombie: return "zombie"; 447 case unloaded: return "unloaded"; 448 default: 449 fatal("unexpected nmethod state: %d", _state); 450 return NULL; 451 } 452 } 453 454 #if INCLUDE_RTM_OPT 455 // rtm state accessing and manipulating 456 RTMState rtm_state() const { return _rtm_state; } 457 void set_rtm_state(RTMState state) { _rtm_state = state; } 458 #endif 459 460 // Make the nmethod non entrant. The nmethod will continue to be 461 // alive. It is used when an uncommon trap happens. Returns true 462 // if this thread changed the state of the nmethod or false if 463 // another thread performed the transition. 464 bool make_not_entrant() { 465 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 466 return make_not_entrant_or_zombie(not_entrant); 467 } 468 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 469 470 // used by jvmti to track if the unload event has been reported 471 bool unload_reported() { return _unload_reported; } 472 void set_unload_reported() { _unload_reported = true; } 473 474 void set_unloading_next(nmethod* next) { _unloading_next = next; } 475 nmethod* unloading_next() { return _unloading_next; } 476 477 static unsigned char global_unloading_clock() { return _global_unloading_clock; } 478 static void increase_unloading_clock(); 479 480 void set_unloading_clock(unsigned char unloading_clock); 481 unsigned char unloading_clock(); 482 483 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } 484 void mark_for_deoptimization(bool inc_recompile_counts = true) { 485 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); 486 } 487 bool update_recompile_counts() const { 488 // Update recompile counts when either the update is explicitly requested (deoptimize) 489 // or the nmethod is not marked for deoptimization at all (not_marked). 490 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. 491 return _mark_for_deoptimization_status != deoptimize_noupdate; 492 } 493 494 void make_unloaded(BoolObjectClosure* is_alive, oop cause); 495 496 bool has_dependencies() { return dependencies_size() != 0; } 497 void flush_dependencies(BoolObjectClosure* is_alive); 498 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 499 void set_has_flushed_dependencies() { 500 assert(!has_flushed_dependencies(), "should only happen once"); 501 _has_flushed_dependencies = 1; 502 } 503 504 bool has_unsafe_access() const { return _has_unsafe_access; } 505 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 506 507 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 508 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 509 510 bool is_lazy_critical_native() const { return _lazy_critical_native; } 511 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } 512 513 bool has_wide_vectors() const { return _has_wide_vectors; } 514 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } 515 516 int comp_level() const { return _comp_level; } 517 518 // Support for oops in scopes and relocs: 519 // Note: index 0 is reserved for null. 520 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } 521 oop* oop_addr_at(int index) const { // for GC 522 // relocation indexes are biased by 1 (because 0 is reserved) 523 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 524 assert(!_oops_are_stale, "oops are stale"); 525 return &oops_begin()[index - 1]; 526 } 527 528 // Support for meta data in scopes and relocs: 529 // Note: index 0 is reserved for null. 530 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 531 Metadata** metadata_addr_at(int index) const { // for GC 532 // relocation indexes are biased by 1 (because 0 is reserved) 533 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 534 return &metadata_begin()[index - 1]; 535 } 536 537 void copy_values(GrowableArray<jobject>* oops); 538 void copy_values(GrowableArray<Metadata*>* metadata); 539 540 Method* attached_method(address call_pc); 541 Method* attached_method_before_pc(address pc); 542 543 // Relocation support 544 private: 545 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 546 inline void initialize_immediate_oop(oop* dest, jobject handle); 547 548 public: 549 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 550 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 551 void verify_oop_relocations(); 552 553 bool is_at_poll_return(address pc); 554 bool is_at_poll_or_poll_return(address pc); 555 556 // Scavengable oop support 557 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } 558 protected: 559 enum { sl_on_list = 0x01, sl_marked = 0x10 }; 560 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } 561 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } 562 // assertion-checking and pruning logic uses the bits of _scavenge_root_state 563 #ifndef PRODUCT 564 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } 565 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } 566 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } 567 // N.B. there is no positive marked query, and we only use the not_marked query for asserts. 568 #endif //PRODUCT 569 nmethod* scavenge_root_link() const { return _scavenge_root_link; } 570 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } 571 572 public: 573 574 // Sweeper support 575 long stack_traversal_mark() { return _stack_traversal_mark; } 576 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 577 578 // Exception cache support 579 ExceptionCache* exception_cache() const { return _exception_cache; } 580 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } 581 address handler_for_exception_and_pc(Handle exception, address pc); 582 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); 583 void clean_exception_cache(BoolObjectClosure* is_alive); 584 585 // implicit exceptions support 586 address continuation_for_implicit_exception(address pc); 587 588 // On-stack replacement support 589 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 590 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 591 void invalidate_osr_method(); 592 nmethod* osr_link() const { return _osr_link; } 593 void set_osr_link(nmethod *n) { _osr_link = n; } 594 595 // tells whether frames described by this nmethod can be deoptimized 596 // note: native wrappers cannot be deoptimized. 597 bool can_be_deoptimized() const { return is_java_method(); } 598 599 // Inline cache support 600 void clear_inline_caches(); 601 void clear_ic_stubs(); 602 void cleanup_inline_caches(); 603 bool inlinecache_check_contains(address addr) const { 604 return (addr >= code_begin() && addr < verified_entry_point()); 605 } 606 607 // Verify calls to dead methods have been cleaned. 608 void verify_clean_inline_caches(); 609 // Verify and count cached icholder relocations. 610 int verify_icholder_relocations(); 611 // Check that all metadata is still alive 612 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); 613 614 // unlink and deallocate this nmethod 615 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 616 // expected to use any other private methods/data in this class. 617 618 protected: 619 void flush(); 620 621 public: 622 // When true is returned, it is unsafe to remove this nmethod even if 623 // it is a zombie, since the VM or the ServiceThread might still be 624 // using it. 625 bool is_locked_by_vm() const { return _lock_count >0; } 626 627 // See comment at definition of _last_seen_on_stack 628 void mark_as_seen_on_stack(); 629 bool can_convert_to_zombie(); 630 631 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 632 void set_method(Method* method) { _method = method; } 633 634 #if INCLUDE_JVMCI 635 oop jvmci_installed_code() { return _jvmci_installed_code ; } 636 char* jvmci_installed_code_name(char* buf, size_t buflen); 637 638 // Update the state of any InstalledCode instance associated with 639 // this nmethod based on the current value of _state. 640 void maybe_invalidate_installed_code(); 641 642 // Helper function to invalidate InstalledCode instances 643 static void invalidate_installed_code(Handle installed_code, TRAPS); 644 645 oop speculation_log() { return _speculation_log ; } 646 647 private: 648 void clear_jvmci_installed_code(); 649 650 public: 651 #endif 652 653 // GC support 654 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 655 // The parallel versions are used by G1. 656 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); 657 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); 658 659 private: 660 // Unload a nmethod if the *root object is dead. 661 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); 662 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred); 663 664 public: 665 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, 666 OopClosure* f); 667 void oops_do(OopClosure* f) { oops_do(f, false); } 668 void oops_do(OopClosure* f, bool allow_zombie); 669 bool detect_scavenge_root_oops(); 670 void verify_scavenge_root_oops() PRODUCT_RETURN; 671 672 bool test_set_oops_do_mark(); 673 static void oops_do_marking_prologue(); 674 static void oops_do_marking_epilogue(); 675 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 676 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 677 678 // ScopeDesc for an instruction 679 ScopeDesc* scope_desc_at(address pc); 680 681 private: 682 ScopeDesc* scope_desc_in(address begin, address end); 683 684 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } 685 686 PcDesc* find_pc_desc_internal(address pc, bool approximate); 687 688 PcDesc* find_pc_desc(address pc, bool approximate) { 689 PcDesc* desc = _pc_desc_cache.last_pc_desc(); 690 if (desc != NULL && desc->pc_offset() == pc - code_begin()) { 691 return desc; 692 } 693 return find_pc_desc_internal(pc, approximate); 694 } 695 696 public: 697 // ScopeDesc retrieval operation 698 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } 699 // pc_desc_near returns the first PcDesc at or after the givne pc. 700 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } 701 702 public: 703 // copying of debugging information 704 void copy_scopes_pcs(PcDesc* pcs, int count); 705 void copy_scopes_data(address buffer, int size); 706 707 // Deopt 708 // Return true is the PC is one would expect if the frame is being deopted. 709 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } 710 bool is_deopt_entry (address pc); 711 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } 712 // Accessor/mutator for the original pc of a frame before a frame was deopted. 713 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 714 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 715 716 static address get_deopt_original_pc(const frame* fr); 717 718 // MethodHandle 719 bool is_method_handle_return(address return_pc); 720 721 // jvmti support: 722 void post_compiled_method_load_event(); 723 jmethodID get_and_cache_jmethod_id(); 724 725 // verify operations 726 void verify(); 727 void verify_scopes(); 728 void verify_interrupt_point(address interrupt_point); 729 730 // printing support 731 void print() const; 732 void print_relocations() PRODUCT_RETURN; 733 void print_pcs() PRODUCT_RETURN; 734 void print_scopes() PRODUCT_RETURN; 735 void print_dependencies() PRODUCT_RETURN; 736 void print_value_on(outputStream* st) const PRODUCT_RETURN; 737 void print_calls(outputStream* st) PRODUCT_RETURN; 738 void print_handler_table() PRODUCT_RETURN; 739 void print_nul_chk_table() PRODUCT_RETURN; 740 void print_recorded_oops() PRODUCT_RETURN; 741 void print_recorded_metadata() PRODUCT_RETURN; 742 743 void maybe_print_nmethod(DirectiveSet* directive); 744 void print_nmethod(bool print_code); 745 746 // need to re-define this from CodeBlob else the overload hides it 747 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 748 void print_on(outputStream* st, const char* msg) const; 749 750 // Logging 751 void log_identity(xmlStream* log) const; 752 void log_new_nmethod() const; 753 void log_state_change() const; 754 755 // Prints block-level comments, including nmethod specific block labels: 756 virtual void print_block_comment(outputStream* stream, address block_begin) const { 757 print_nmethod_labels(stream, block_begin); 758 CodeBlob::print_block_comment(stream, block_begin); 759 } 760 void print_nmethod_labels(outputStream* stream, address block_begin) const; 761 762 // Prints a comment for one native instruction (reloc info, pc desc) 763 void print_code_comment_on(outputStream* st, int column, address begin, address end); 764 static void print_statistics() PRODUCT_RETURN; 765 766 // Compiler task identification. Note that all OSR methods 767 // are numbered in an independent sequence if CICountOSR is true, 768 // and native method wrappers are also numbered independently if 769 // CICountNative is true. 770 int compile_id() const { return _compile_id; } 771 const char* compile_kind() const; 772 773 // tells if any of this method's dependencies have been invalidated 774 // (this is expensive!) 775 static void check_all_dependencies(DepChange& changes); 776 777 // tells if this compiled method is dependent on the given changes, 778 // and the changes have invalidated it 779 bool check_dependency_on(DepChange& changes); 780 781 // Evolution support. Tells if this compiled method is dependent on any of 782 // methods m() of class dependee, such that if m() in dependee is replaced, 783 // this compiled method will have to be deoptimized. 784 bool is_evol_dependent_on(Klass* dependee); 785 786 // Fast breakpoint support. Tells if this compiled method is 787 // dependent on the given method. Returns true if this nmethod 788 // corresponds to the given method as well. 789 bool is_dependent_on_method(Method* dependee); 790 791 // is it ok to patch at address? 792 bool is_patchable_at(address instr_address); 793 794 // UseBiasedLocking support 795 ByteSize native_receiver_sp_offset() { 796 return _native_receiver_sp_offset; 797 } 798 ByteSize native_basic_lock_sp_offset() { 799 return _native_basic_lock_sp_offset; 800 } 801 802 // support for code generation 803 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 804 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 805 static int state_offset() { return offset_of(nmethod, _state); } 806 807 // RedefineClasses support. Mark metadata in nmethods as on_stack so that 808 // redefine classes doesn't purge it. 809 static void mark_on_stack(nmethod* nm) { 810 nm->metadata_do(Metadata::mark_on_stack); 811 } 812 void metadata_do(void f(Metadata*)); 813 }; 814 815 // Locks an nmethod so its code will not get removed and it will not 816 // be made into a zombie, even if it is a not_entrant method. After the 817 // nmethod becomes a zombie, if CompiledMethodUnload event processing 818 // needs to be done, then lock_nmethod() is used directly to keep the 819 // generated code from being reused too early. 820 class nmethodLocker : public StackObj { 821 nmethod* _nm; 822 823 public: 824 825 // note: nm can be NULL 826 // Only JvmtiDeferredEvent::compiled_method_unload_event() 827 // should pass zombie_ok == true. 828 static void lock_nmethod(nmethod* nm, bool zombie_ok = false); 829 static void unlock_nmethod(nmethod* nm); // (ditto) 830 831 nmethodLocker(address pc); // derive nm from pc 832 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 833 nmethodLocker() { _nm = NULL; } 834 ~nmethodLocker() { unlock_nmethod(_nm); } 835 836 nmethod* code() { return _nm; } 837 void set_code(nmethod* new_nm) { 838 unlock_nmethod(_nm); // note: This works even if _nm==new_nm. 839 _nm = new_nm; 840 lock_nmethod(_nm); 841 } 842 }; 843 844 #endif // SHARE_VM_CODE_NMETHOD_HPP