89
90 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
91 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
92 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
93 unsigned int markedForReclamation:1; // Used by NMethodSweeper
94
95 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
96 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
97
98 unsigned int speculatively_disconnected:1; // Marked for potential unload
99
100 void clear();
101 };
102
103
104 // A nmethod contains:
105 // - header (the nmethod structure)
106 // [Relocation]
107 // - relocation information
108 // - constant part (doubles, longs and floats used in nmethod)
109 // [Code]
110 // - code body
111 // - exception handler
112 // - stub code
113 // [Debugging information]
114 // - oop array
115 // - data array
116 // - pcs
117 // [Exception handler table]
118 // - handler entry point array
119 // [Implicit Null Pointer exception table]
120 // - implicit null table array
121
122 class Dependencies;
123 class ExceptionHandlerTable;
124 class ImplicitExceptionTable;
125 class AbstractCompiler;
126 class xmlStream;
127
128 class nmethod : public CodeBlob {
145 nmethod* volatile _oops_do_mark_link;
146
147 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
148
149 // Offsets for different nmethod parts
150 int _exception_offset;
151 // All deoptee's will resume execution at this location described by
152 // this offset.
153 int _deoptimize_offset;
154 // All deoptee's at a MethodHandle call site will resume execution
155 // at this location described by this offset.
156 int _deoptimize_mh_offset;
157 // Offset of the unwind handler if it exists
158 int _unwind_handler_offset;
159
160 #ifdef HAVE_DTRACE_H
161 int _trap_offset;
162 #endif // def HAVE_DTRACE_H
163 int _stub_offset;
164 int _consts_offset;
165 int _scopes_data_offset;
166 int _scopes_pcs_offset;
167 int _dependencies_offset;
168 int _handler_table_offset;
169 int _nul_chk_table_offset;
170 int _nmethod_end_offset;
171
172 // location in frame (offset for sp) that deopt can store the original
173 // pc during a deopt.
174 int _orig_pc_offset;
175
176 int _compile_id; // which compilation made this nmethod
177 int _comp_level; // compilation level
178
179 // offsets for entry points
180 address _entry_point; // entry point with class check
181 address _verified_entry_point; // entry point without class check
182 address _osr_entry_point; // entry point for on stack replacement
183
184 nmFlags flags; // various flags to keep track of nmethod state
331
332 // type info
333 bool is_nmethod() const { return true; }
334 bool is_java_method() const { return !method()->is_native(); }
335 bool is_native_method() const { return method()->is_native(); }
336 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
337
338 bool is_compiled_by_c1() const;
339 bool is_compiled_by_c2() const;
340
341 // boundaries for different parts
342 address code_begin () const { return _entry_point; }
343 address code_end () const { return header_begin() + _stub_offset ; }
344 address exception_begin () const { return header_begin() + _exception_offset ; }
345 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
346 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
347 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
348 address stub_begin () const { return header_begin() + _stub_offset ; }
349 address stub_end () const { return header_begin() + _consts_offset ; }
350 address consts_begin () const { return header_begin() + _consts_offset ; }
351 address consts_end () const { return header_begin() + _scopes_data_offset ; }
352 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
353 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
354 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
355 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
356 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
357 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
358 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
359 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
360 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
361 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
362
363 int code_size () const { return code_end () - code_begin (); }
364 int stub_size () const { return stub_end () - stub_begin (); }
365 int consts_size () const { return consts_end () - consts_begin (); }
366 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
367 int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
368 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
369 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
370 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
371
372 int total_size () const;
373
374 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
375 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
376 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
377 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
378 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
379 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
380 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
381
382 // entry points
383 address entry_point() const { return _entry_point; } // normal entry point
384 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
385
386 // flag accessing and manipulation
387 bool is_in_use() const { return flags.state == alive; }
388 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
389 bool is_not_entrant() const { return flags.state == not_entrant; }
390 bool is_zombie() const { return flags.state == zombie; }
391 bool is_unloaded() const { return flags.state == unloaded; }
392
393 // Make the nmethod non entrant. The nmethod will continue to be
394 // alive. It is used when an uncommon trap happens. Returns true
395 // if this thread changed the state of the nmethod or false if
396 // another thread performed the transition.
419 void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
420 void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
421
422 bool has_unsafe_access() const { return flags.has_unsafe_access; }
423 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
424
425 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
426 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
427
428 bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
429 void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
430
431 int level() const { return flags.level; }
432 void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
433
434 int comp_level() const { return _comp_level; }
435
436 int version() const { return flags.version; }
437 void set_version(int v);
438
439 // Non-perm oop support
440 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
441 protected:
442 enum { npl_on_list = 0x01, npl_marked = 0x10 };
443 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
444 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
445 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
446 #ifndef PRODUCT
447 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
448 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
449 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
450 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
451 #endif //PRODUCT
452 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
453 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
454
455 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
456 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
457
458 public:
499 public:
500 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
501 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
502 bool is_locked_by_vm() const { return _lock_count >0; }
503
504 // See comment at definition of _last_seen_on_stack
505 void mark_as_seen_on_stack();
506 bool can_not_entrant_be_converted();
507
508 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
509 void set_method(methodOop method) { _method = method; }
510
511 // GC support
512 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
513 bool unloading_occurred);
514 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
515 oop* root, bool unloading_occurred);
516
517 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
518 OopClosure* f);
519 virtual void oops_do(OopClosure* f) { oops_do(f, false); }
520 void oops_do(OopClosure* f, bool do_strong_roots_only);
521 bool detect_scavenge_root_oops();
522 void verify_scavenge_root_oops() PRODUCT_RETURN;
523
524 bool test_set_oops_do_mark();
525 static void oops_do_marking_prologue();
526 static void oops_do_marking_epilogue();
527 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
528 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
529
530 // ScopeDesc for an instruction
531 ScopeDesc* scope_desc_at(address pc);
532
533 private:
534 ScopeDesc* scope_desc_in(address begin, address end);
535
536 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
537
538 PcDesc* find_pc_desc_internal(address pc, bool approximate);
539
|
89
90 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
91 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
92 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
93 unsigned int markedForReclamation:1; // Used by NMethodSweeper
94
95 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
96 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
97
98 unsigned int speculatively_disconnected:1; // Marked for potential unload
99
100 void clear();
101 };
102
103
104 // A nmethod contains:
105 // - header (the nmethod structure)
106 // [Relocation]
107 // - relocation information
108 // - constant part (doubles, longs and floats used in nmethod)
109 // - oop table
110 // [Code]
111 // - code body
112 // - exception handler
113 // - stub code
114 // [Debugging information]
115 // - oop array
116 // - data array
117 // - pcs
118 // [Exception handler table]
119 // - handler entry point array
120 // [Implicit Null Pointer exception table]
121 // - implicit null table array
122
123 class Dependencies;
124 class ExceptionHandlerTable;
125 class ImplicitExceptionTable;
126 class AbstractCompiler;
127 class xmlStream;
128
129 class nmethod : public CodeBlob {
146 nmethod* volatile _oops_do_mark_link;
147
148 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
149
150 // Offsets for different nmethod parts
151 int _exception_offset;
152 // All deoptee's will resume execution at this location described by
153 // this offset.
154 int _deoptimize_offset;
155 // All deoptee's at a MethodHandle call site will resume execution
156 // at this location described by this offset.
157 int _deoptimize_mh_offset;
158 // Offset of the unwind handler if it exists
159 int _unwind_handler_offset;
160
161 #ifdef HAVE_DTRACE_H
162 int _trap_offset;
163 #endif // def HAVE_DTRACE_H
164 int _stub_offset;
165 int _consts_offset;
166 int _oops_offset; // offset to where embedded oop table begins (inside data)
167 int _scopes_data_offset;
168 int _scopes_pcs_offset;
169 int _dependencies_offset;
170 int _handler_table_offset;
171 int _nul_chk_table_offset;
172 int _nmethod_end_offset;
173
174 // location in frame (offset for sp) that deopt can store the original
175 // pc during a deopt.
176 int _orig_pc_offset;
177
178 int _compile_id; // which compilation made this nmethod
179 int _comp_level; // compilation level
180
181 // offsets for entry points
182 address _entry_point; // entry point with class check
183 address _verified_entry_point; // entry point without class check
184 address _osr_entry_point; // entry point for on stack replacement
185
186 nmFlags flags; // various flags to keep track of nmethod state
333
334 // type info
335 bool is_nmethod() const { return true; }
336 bool is_java_method() const { return !method()->is_native(); }
337 bool is_native_method() const { return method()->is_native(); }
338 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
339
340 bool is_compiled_by_c1() const;
341 bool is_compiled_by_c2() const;
342
343 // boundaries for different parts
344 address code_begin () const { return _entry_point; }
345 address code_end () const { return header_begin() + _stub_offset ; }
346 address exception_begin () const { return header_begin() + _exception_offset ; }
347 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
348 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
349 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
350 address stub_begin () const { return header_begin() + _stub_offset ; }
351 address stub_end () const { return header_begin() + _consts_offset ; }
352 address consts_begin () const { return header_begin() + _consts_offset ; }
353 address consts_end () const { return header_begin() + _oops_offset ; }
354 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
355 oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
356
357 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
358 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
359 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
360 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
361 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
362 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
363 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
364 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
365 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
366 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
367
368 // Sizes
369 int code_size () const { return code_end () - code_begin (); }
370 int stub_size () const { return stub_end () - stub_begin (); }
371 int consts_size () const { return consts_end () - consts_begin (); }
372 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
373 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
374 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
375 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
376 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
377 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
378
379 int total_size () const;
380
381 // Containment
382 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
383 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
384 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
385 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
386 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
387 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
388 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
389 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
390
391 // entry points
392 address entry_point() const { return _entry_point; } // normal entry point
393 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
394
395 // flag accessing and manipulation
396 bool is_in_use() const { return flags.state == alive; }
397 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
398 bool is_not_entrant() const { return flags.state == not_entrant; }
399 bool is_zombie() const { return flags.state == zombie; }
400 bool is_unloaded() const { return flags.state == unloaded; }
401
402 // Make the nmethod non entrant. The nmethod will continue to be
403 // alive. It is used when an uncommon trap happens. Returns true
404 // if this thread changed the state of the nmethod or false if
405 // another thread performed the transition.
428 void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
429 void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
430
431 bool has_unsafe_access() const { return flags.has_unsafe_access; }
432 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
433
434 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
435 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
436
437 bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
438 void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
439
440 int level() const { return flags.level; }
441 void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
442
443 int comp_level() const { return _comp_level; }
444
445 int version() const { return flags.version; }
446 void set_version(int v);
447
448 // Support for oops in scopes and relocs:
449 // Note: index 0 is reserved for null.
450 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
451 oop* oop_addr_at(int index) const { // for GC
452 // relocation indexes are biased by 1 (because 0 is reserved)
453 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
454 return &oops_begin()[index - 1];
455 }
456
457 void copy_oops(GrowableArray<jobject>* oops);
458
459 // Relocation support
460 private:
461 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
462 inline void initialize_immediate_oop(oop* dest, jobject handle);
463
464 public:
465 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
466 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
467
468 bool is_at_poll_return(address pc);
469 bool is_at_poll_or_poll_return(address pc);
470
471 // Non-perm oop support
472 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
473 protected:
474 enum { npl_on_list = 0x01, npl_marked = 0x10 };
475 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
476 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
477 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
478 #ifndef PRODUCT
479 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
480 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
481 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
482 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
483 #endif //PRODUCT
484 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
485 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
486
487 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
488 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
489
490 public:
531 public:
532 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
533 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
534 bool is_locked_by_vm() const { return _lock_count >0; }
535
536 // See comment at definition of _last_seen_on_stack
537 void mark_as_seen_on_stack();
538 bool can_not_entrant_be_converted();
539
540 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
541 void set_method(methodOop method) { _method = method; }
542
543 // GC support
544 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
545 bool unloading_occurred);
546 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
547 oop* root, bool unloading_occurred);
548
549 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
550 OopClosure* f);
551 void oops_do(OopClosure* f) { oops_do(f, false); }
552 void oops_do(OopClosure* f, bool do_strong_roots_only);
553 bool detect_scavenge_root_oops();
554 void verify_scavenge_root_oops() PRODUCT_RETURN;
555
556 bool test_set_oops_do_mark();
557 static void oops_do_marking_prologue();
558 static void oops_do_marking_epilogue();
559 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
560 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
561
562 // ScopeDesc for an instruction
563 ScopeDesc* scope_desc_at(address pc);
564
565 private:
566 ScopeDesc* scope_desc_in(address begin, address end);
567
568 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
569
570 PcDesc* find_pc_desc_internal(address pc, bool approximate);
571
|