109 class ExceptionHandlerTable;
110 class ImplicitExceptionTable;
111 class AbstractCompiler;
112 class xmlStream;
113
114 class nmethod : public CodeBlob {
115 friend class VMStructs;
116 friend class NMethodSweeper;
117 friend class CodeCache; // scavengable oops
118 private:
119
120 // GC support to help figure out if an nmethod has been
121 // cleaned/unloaded by the current GC.
122 static unsigned char _global_unloading_clock;
123
124 // Shared fields for all nmethod's
125 Method* _method;
126 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
127 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
128
129 // To support simple linked-list chaining of nmethods:
130 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
131
132 union {
133 // Used by G1 to chain nmethods.
134 nmethod* _unloading_next;
135 // Used by non-G1 GCs to chain nmethods.
136 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
137 };
138
139 static nmethod* volatile _oops_do_mark_nmethods;
140 nmethod* volatile _oops_do_mark_link;
141
142 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
143
144 // offsets for entry points
145 address _entry_point; // entry point with class check
146 address _verified_entry_point; // entry point without class check
147 address _osr_entry_point; // entry point for on stack replacement
148
256 int frame_size,
257 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
258 ByteSize basic_lock_sp_offset, /* synchronized natives only */
259 OopMapSet* oop_maps);
260
261 // Creation support
262 nmethod(Method* method,
263 int nmethod_size,
264 int compile_id,
265 int entry_bci,
266 CodeOffsets* offsets,
267 int orig_pc_offset,
268 DebugInformationRecorder *recorder,
269 Dependencies* dependencies,
270 CodeBuffer *code_buffer,
271 int frame_size,
272 OopMapSet* oop_maps,
273 ExceptionHandlerTable* handler_table,
274 ImplicitExceptionTable* nul_chk_table,
275 AbstractCompiler* compiler,
276 int comp_level);
277
278 // helper methods
279 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
280
281 const char* reloc_string_for(u_char* begin, u_char* end);
282 // Returns true if this thread changed the state of the nmethod or
283 // false if another thread performed the transition.
284 bool make_not_entrant_or_zombie(unsigned int state);
285 void inc_decompile_count();
286
287 // Used to manipulate the exception cache
288 void add_exception_cache_entry(ExceptionCache* new_entry);
289 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
290
291 // Inform external interfaces that a compiled method has been unloaded
292 void post_compiled_method_unload();
293
294 // Initailize fields to their default values
295 void init_defaults();
296
297 public:
298 // create nmethod with entry_bci
299 static nmethod* new_nmethod(methodHandle method,
300 int compile_id,
301 int entry_bci,
302 CodeOffsets* offsets,
303 int orig_pc_offset,
304 DebugInformationRecorder* recorder,
305 Dependencies* dependencies,
306 CodeBuffer *code_buffer,
307 int frame_size,
308 OopMapSet* oop_maps,
309 ExceptionHandlerTable* handler_table,
310 ImplicitExceptionTable* nul_chk_table,
311 AbstractCompiler* compiler,
312 int comp_level);
313
314 static nmethod* new_native_nmethod(methodHandle method,
315 int compile_id,
316 CodeBuffer *code_buffer,
317 int vep_offset,
318 int frame_complete,
319 int frame_size,
320 ByteSize receiver_sp_offset,
321 ByteSize basic_lock_sp_offset,
322 OopMapSet* oop_maps);
323
324 // accessors
325 Method* method() const { return _method; }
326 AbstractCompiler* compiler() const { return _compiler; }
327
328 // type info
329 bool is_nmethod() const { return true; }
330 bool is_java_method() const { return !method()->is_native(); }
331 bool is_native_method() const { return method()->is_native(); }
332 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
333
334 bool is_compiled_by_c1() const;
335 bool is_compiled_by_c2() const;
336 bool is_compiled_by_shark() const;
337
338 // boundaries for different parts
339 address consts_begin () const { return header_begin() + _consts_offset ; }
340 address consts_end () const { return header_begin() + code_offset() ; }
341 address insts_begin () const { return header_begin() + code_offset() ; }
342 address insts_end () const { return header_begin() + _stub_offset ; }
343 address stub_begin () const { return header_begin() + _stub_offset ; }
344 address stub_end () const { return header_begin() + _oops_offset ; }
345 address exception_begin () const { return header_begin() + _exception_offset ; }
346 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
347 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
348 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
349 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
350 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
351
352 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
353 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
354
565 // unlink and deallocate this nmethod
566 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
567 // expected to use any other private methods/data in this class.
568
569 protected:
570 void flush();
571
572 public:
573 // When true is returned, it is unsafe to remove this nmethod even if
574 // it is a zombie, since the VM or the ServiceThread might still be
575 // using it.
576 bool is_locked_by_vm() const { return _lock_count >0; }
577
578 // See comment at definition of _last_seen_on_stack
579 void mark_as_seen_on_stack();
580 bool can_convert_to_zombie();
581
582 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
583 void set_method(Method* method) { _method = method; }
584
585 // GC support
586 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
587 // The parallel versions are used by G1.
588 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
589 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
590
591 private:
592 // Unload a nmethod if the *root object is dead.
593 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
594 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
595
596 public:
597 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
598 OopClosure* f);
599 void oops_do(OopClosure* f) { oops_do(f, false); }
600 void oops_do(OopClosure* f, bool allow_zombie);
601 bool detect_scavenge_root_oops();
602 void verify_scavenge_root_oops() PRODUCT_RETURN;
603
604 bool test_set_oops_do_mark();
622 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
623 return desc;
624 }
625 return find_pc_desc_internal(pc, approximate);
626 }
627
628 public:
629 // ScopeDesc retrieval operation
630 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
631 // pc_desc_near returns the first PcDesc at or after the givne pc.
632 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
633
634 public:
635 // copying of debugging information
636 void copy_scopes_pcs(PcDesc* pcs, int count);
637 void copy_scopes_data(address buffer, int size);
638
639 // Deopt
640 // Return true is the PC is one would expect if the frame is being deopted.
641 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
642 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
643 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
644 // Accessor/mutator for the original pc of a frame before a frame was deopted.
645 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
646 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
647
648 static address get_deopt_original_pc(const frame* fr);
649
650 // MethodHandle
651 bool is_method_handle_return(address return_pc);
652
653 // jvmti support:
654 void post_compiled_method_load_event();
655 jmethodID get_and_cache_jmethod_id();
656
657 // verify operations
658 void verify();
659 void verify_scopes();
660 void verify_interrupt_point(address interrupt_point);
661
662 // printing support
|
109 class ExceptionHandlerTable;
110 class ImplicitExceptionTable;
111 class AbstractCompiler;
112 class xmlStream;
113
114 class nmethod : public CodeBlob {
115 friend class VMStructs;
116 friend class NMethodSweeper;
117 friend class CodeCache; // scavengable oops
118 private:
119
120 // GC support to help figure out if an nmethod has been
121 // cleaned/unloaded by the current GC.
122 static unsigned char _global_unloading_clock;
123
124 // Shared fields for all nmethod's
125 Method* _method;
126 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
127 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
128
129 #if INCLUDE_JVMCI
130 // Needed to keep nmethods alive that are not the default nmethod for the associated Method.
131 oop _jvmci_installed_code;
132 oop _speculation_log;
133 #endif
134
135 // To support simple linked-list chaining of nmethods:
136 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
137
138 union {
139 // Used by G1 to chain nmethods.
140 nmethod* _unloading_next;
141 // Used by non-G1 GCs to chain nmethods.
142 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
143 };
144
145 static nmethod* volatile _oops_do_mark_nmethods;
146 nmethod* volatile _oops_do_mark_link;
147
148 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
149
150 // offsets for entry points
151 address _entry_point; // entry point with class check
152 address _verified_entry_point; // entry point without class check
153 address _osr_entry_point; // entry point for on stack replacement
154
262 int frame_size,
263 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
264 ByteSize basic_lock_sp_offset, /* synchronized natives only */
265 OopMapSet* oop_maps);
266
267 // Creation support
268 nmethod(Method* method,
269 int nmethod_size,
270 int compile_id,
271 int entry_bci,
272 CodeOffsets* offsets,
273 int orig_pc_offset,
274 DebugInformationRecorder *recorder,
275 Dependencies* dependencies,
276 CodeBuffer *code_buffer,
277 int frame_size,
278 OopMapSet* oop_maps,
279 ExceptionHandlerTable* handler_table,
280 ImplicitExceptionTable* nul_chk_table,
281 AbstractCompiler* compiler,
282 int comp_level
283 #if INCLUDE_JVMCI
284 , Handle installed_code,
285 Handle speculation_log
286 #endif
287 );
288
289 // helper methods
290 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
291
292 const char* reloc_string_for(u_char* begin, u_char* end);
293 // Returns true if this thread changed the state of the nmethod or
294 // false if another thread performed the transition.
295 bool make_not_entrant_or_zombie(unsigned int state);
296 void inc_decompile_count();
297
298 // Used to manipulate the exception cache
299 void add_exception_cache_entry(ExceptionCache* new_entry);
300 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
301
302 // Inform external interfaces that a compiled method has been unloaded
303 void post_compiled_method_unload();
304
305 // Initailize fields to their default values
306 void init_defaults();
307
308 public:
309 // create nmethod with entry_bci
310 static nmethod* new_nmethod(methodHandle method,
311 int compile_id,
312 int entry_bci,
313 CodeOffsets* offsets,
314 int orig_pc_offset,
315 DebugInformationRecorder* recorder,
316 Dependencies* dependencies,
317 CodeBuffer *code_buffer,
318 int frame_size,
319 OopMapSet* oop_maps,
320 ExceptionHandlerTable* handler_table,
321 ImplicitExceptionTable* nul_chk_table,
322 AbstractCompiler* compiler,
323 int comp_level
324 #if INCLUDE_JVMCI
325 , Handle installed_code = Handle(),
326 Handle speculation_log = Handle()
327 #endif
328 );
329
330 static nmethod* new_native_nmethod(methodHandle method,
331 int compile_id,
332 CodeBuffer *code_buffer,
333 int vep_offset,
334 int frame_complete,
335 int frame_size,
336 ByteSize receiver_sp_offset,
337 ByteSize basic_lock_sp_offset,
338 OopMapSet* oop_maps);
339
340 // accessors
341 Method* method() const { return _method; }
342 AbstractCompiler* compiler() const { return _compiler; }
343
344 // type info
345 bool is_nmethod() const { return true; }
346 bool is_java_method() const { return !method()->is_native(); }
347 bool is_native_method() const { return method()->is_native(); }
348 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
349
350 bool is_compiled_by_c1() const;
351 bool is_compiled_by_jvmci() const;
352 bool is_compiled_by_c2() const;
353 bool is_compiled_by_shark() const;
354
355 // boundaries for different parts
356 address consts_begin () const { return header_begin() + _consts_offset ; }
357 address consts_end () const { return header_begin() + code_offset() ; }
358 address insts_begin () const { return header_begin() + code_offset() ; }
359 address insts_end () const { return header_begin() + _stub_offset ; }
360 address stub_begin () const { return header_begin() + _stub_offset ; }
361 address stub_end () const { return header_begin() + _oops_offset ; }
362 address exception_begin () const { return header_begin() + _exception_offset ; }
363 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
364 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
365 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
366 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
367 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
368
369 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
370 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
371
582 // unlink and deallocate this nmethod
583 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
584 // expected to use any other private methods/data in this class.
585
586 protected:
587 void flush();
588
589 public:
590 // When true is returned, it is unsafe to remove this nmethod even if
591 // it is a zombie, since the VM or the ServiceThread might still be
592 // using it.
593 bool is_locked_by_vm() const { return _lock_count >0; }
594
595 // See comment at definition of _last_seen_on_stack
596 void mark_as_seen_on_stack();
597 bool can_convert_to_zombie();
598
599 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
600 void set_method(Method* method) { _method = method; }
601
602 #if INCLUDE_JVMCI
603 oop jvmci_installed_code() { return _jvmci_installed_code ; }
604 char* jvmci_installed_code_name(char* buf, size_t buflen);
605 void set_jvmci_installed_code(oop installed_code) { _jvmci_installed_code = installed_code; }
606 oop speculation_log() { return _speculation_log ; }
607 void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; }
608 #endif
609
610 // GC support
611 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
612 // The parallel versions are used by G1.
613 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
614 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
615
616 private:
617 // Unload a nmethod if the *root object is dead.
618 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
619 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
620
621 public:
622 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
623 OopClosure* f);
624 void oops_do(OopClosure* f) { oops_do(f, false); }
625 void oops_do(OopClosure* f, bool allow_zombie);
626 bool detect_scavenge_root_oops();
627 void verify_scavenge_root_oops() PRODUCT_RETURN;
628
629 bool test_set_oops_do_mark();
647 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
648 return desc;
649 }
650 return find_pc_desc_internal(pc, approximate);
651 }
652
653 public:
654 // ScopeDesc retrieval operation
655 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
656 // pc_desc_near returns the first PcDesc at or after the givne pc.
657 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
658
659 public:
660 // copying of debugging information
661 void copy_scopes_pcs(PcDesc* pcs, int count);
662 void copy_scopes_data(address buffer, int size);
663
664 // Deopt
665 // Return true is the PC is one would expect if the frame is being deopted.
666 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
667 bool is_deopt_entry (address pc);
668 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
669 // Accessor/mutator for the original pc of a frame before a frame was deopted.
670 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
671 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
672
673 static address get_deopt_original_pc(const frame* fr);
674
675 // MethodHandle
676 bool is_method_handle_return(address return_pc);
677
678 // jvmti support:
679 void post_compiled_method_load_event();
680 jmethodID get_and_cache_jmethod_id();
681
682 // verify operations
683 void verify();
684 void verify_scopes();
685 void verify_interrupt_point(address interrupt_point);
686
687 // printing support
|