102 int _speculations_offset;
103 int _jvmci_data_offset;
104 #endif
105 int _nmethod_end_offset;
106
107 int code_offset() const { return (address) code_begin() - header_begin(); }
108
109 // location in frame (offset for sp) that deopt can store the original
110 // pc during a deopt.
111 int _orig_pc_offset;
112
113 int _compile_id; // which compilation made this nmethod
114 int _comp_level; // compilation level
115
116 // protected by CodeCache_lock
117 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
118
119 // used by jvmti to track if an unload event has been posted for this nmethod.
120 bool _unload_reported;
121
122 // Protected by Patching_lock
123 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
124
125 #ifdef ASSERT
126 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
127 #endif
128
129 #if INCLUDE_RTM_OPT
130 // RTM state at compile time. Used during deoptimization to decide
131 // whether to restart collecting RTM locking abort statistic again.
132 RTMState _rtm_state;
133 #endif
134
135 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
136 // and is not made into a zombie. However, once the nmethod is made into
137 // a zombie, it will be locked one final time if CompiledMethodUnload
138 // event processing needs to be done.
139 volatile jint _lock_count;
140
141 // not_entrant method removal. Each mark_sweep pass will update
142 // this mark to current sweep invocation count if it is seen on the
373 bool unload_reported() { return _unload_reported; }
374 void set_unload_reported() { _unload_reported = true; }
375
376 int get_state() const {
377 return _state;
378 }
379
380 void make_unloaded();
381
382 bool has_dependencies() { return dependencies_size() != 0; }
383 void print_dependencies() PRODUCT_RETURN;
384 void flush_dependencies(bool delete_immediately);
385 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
386 void set_has_flushed_dependencies() {
387 assert(!has_flushed_dependencies(), "should only happen once");
388 _has_flushed_dependencies = 1;
389 }
390
391 int comp_level() const { return _comp_level; }
392
393 void unlink_from_method(bool acquire_lock);
394
395 // Support for oops in scopes and relocs:
396 // Note: index 0 is reserved for null.
397 oop oop_at(int index) const;
398 oop oop_at_phantom(int index) const; // phantom reference
399 oop* oop_addr_at(int index) const { // for GC
400 // relocation indexes are biased by 1 (because 0 is reserved)
401 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
402 assert(!_oops_are_stale, "oops are stale");
403 return &oops_begin()[index - 1];
404 }
405
406 // Support for meta data in scopes and relocs:
407 // Note: index 0 is reserved for null.
408 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
409 Metadata** metadata_addr_at(int index) const { // for GC
410 // relocation indexes are biased by 1 (because 0 is reserved)
411 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
412 return &metadata_begin()[index - 1];
413 }
|
102 int _speculations_offset;
103 int _jvmci_data_offset;
104 #endif
105 int _nmethod_end_offset;
106
107 int code_offset() const { return (address) code_begin() - header_begin(); }
108
109 // location in frame (offset for sp) that deopt can store the original
110 // pc during a deopt.
111 int _orig_pc_offset;
112
113 int _compile_id; // which compilation made this nmethod
114 int _comp_level; // compilation level
115
116 // protected by CodeCache_lock
117 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
118
119 // used by jvmti to track if an unload event has been posted for this nmethod.
120 bool _unload_reported;
121
122 // Protected by CompiledMethod_lock
123 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
124
125 #ifdef ASSERT
126 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
127 #endif
128
129 #if INCLUDE_RTM_OPT
130 // RTM state at compile time. Used during deoptimization to decide
131 // whether to restart collecting RTM locking abort statistic again.
132 RTMState _rtm_state;
133 #endif
134
135 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
136 // and is not made into a zombie. However, once the nmethod is made into
137 // a zombie, it will be locked one final time if CompiledMethodUnload
138 // event processing needs to be done.
139 volatile jint _lock_count;
140
141 // not_entrant method removal. Each mark_sweep pass will update
142 // this mark to current sweep invocation count if it is seen on the
373 bool unload_reported() { return _unload_reported; }
374 void set_unload_reported() { _unload_reported = true; }
375
376 int get_state() const {
377 return _state;
378 }
379
380 void make_unloaded();
381
382 bool has_dependencies() { return dependencies_size() != 0; }
383 void print_dependencies() PRODUCT_RETURN;
384 void flush_dependencies(bool delete_immediately);
385 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
386 void set_has_flushed_dependencies() {
387 assert(!has_flushed_dependencies(), "should only happen once");
388 _has_flushed_dependencies = 1;
389 }
390
391 int comp_level() const { return _comp_level; }
392
393 void unlink_from_method();
394
395 // Support for oops in scopes and relocs:
396 // Note: index 0 is reserved for null.
397 oop oop_at(int index) const;
398 oop oop_at_phantom(int index) const; // phantom reference
399 oop* oop_addr_at(int index) const { // for GC
400 // relocation indexes are biased by 1 (because 0 is reserved)
401 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
402 assert(!_oops_are_stale, "oops are stale");
403 return &oops_begin()[index - 1];
404 }
405
406 // Support for meta data in scopes and relocs:
407 // Note: index 0 is reserved for null.
408 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
409 Metadata** metadata_addr_at(int index) const { // for GC
410 // relocation indexes are biased by 1 (because 0 is reserved)
411 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
412 return &metadata_begin()[index - 1];
413 }
|