1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
171 int _stub_offset;
172 int _oops_offset; // offset to where embedded oop table begins (inside data)
173 int _metadata_offset; // embedded meta data table
174 int _scopes_data_offset;
175 int _scopes_pcs_offset;
176 int _dependencies_offset;
177 int _handler_table_offset;
178 int _nul_chk_table_offset;
179 int _nmethod_end_offset;
180
181 // location in frame (offset for sp) that deopt can store the original
182 // pc during a deopt.
183 int _orig_pc_offset;
184
185 int _compile_id; // which compilation made this nmethod
186 int _comp_level; // compilation level
187
188 // protected by CodeCache_lock
189 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
190
191 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
192
193 enum MarkForDeoptimizationStatus {
194 not_marked,
195 deoptimize,
196 deoptimize_noupdate };
197
198 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
199
200 // used by jvmti to track if an unload event has been posted for this nmethod.
201 bool _unload_reported;
202
203 // set during construction
204 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
205 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
206 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
207 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
208
209 // Protected by Patching_lock
210 volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}
211
212 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
485 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
486 void mark_for_deoptimization(bool inc_recompile_counts = true) {
487 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
488 }
489 bool update_recompile_counts() const {
490 // Update recompile counts when either the update is explicitly requested (deoptimize)
491 // or the nmethod is not marked for deoptimization at all (not_marked).
492 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
493 return _mark_for_deoptimization_status != deoptimize_noupdate;
494 }
495
496 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
497
498 bool has_dependencies() { return dependencies_size() != 0; }
499 void flush_dependencies(BoolObjectClosure* is_alive);
500 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
501 void set_has_flushed_dependencies() {
502 assert(!has_flushed_dependencies(), "should only happen once");
503 _has_flushed_dependencies = 1;
504 }
505
506 bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
507 void mark_for_reclamation() { _marked_for_reclamation = 1; }
508
509 bool has_unsafe_access() const { return _has_unsafe_access; }
510 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
511
512 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
513 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
514
515 bool is_lazy_critical_native() const { return _lazy_critical_native; }
516 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
517
518 bool has_wide_vectors() const { return _has_wide_vectors; }
519 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
520
521 int comp_level() const { return _comp_level; }
522
523 // Support for oops in scopes and relocs:
524 // Note: index 0 is reserved for null.
525 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
526 oop* oop_addr_at(int index) const { // for GC
527 // relocation indexes are biased by 1 (because 0 is reserved)
|
1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
171 int _stub_offset;
172 int _oops_offset; // offset to where embedded oop table begins (inside data)
173 int _metadata_offset; // embedded meta data table
174 int _scopes_data_offset;
175 int _scopes_pcs_offset;
176 int _dependencies_offset;
177 int _handler_table_offset;
178 int _nul_chk_table_offset;
179 int _nmethod_end_offset;
180
181 // location in frame (offset for sp) that deopt can store the original
182 // pc during a deopt.
183 int _orig_pc_offset;
184
185 int _compile_id; // which compilation made this nmethod
186 int _comp_level; // compilation level
187
188 // protected by CodeCache_lock
189 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
190
191 enum MarkForDeoptimizationStatus {
192 not_marked,
193 deoptimize,
194 deoptimize_noupdate };
195
196 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
197
198 // used by jvmti to track if an unload event has been posted for this nmethod.
199 bool _unload_reported;
200
201 // set during construction
202 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
203 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
204 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
205 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
206
207 // Protected by Patching_lock
208 volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}
209
210 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
483 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
484 void mark_for_deoptimization(bool inc_recompile_counts = true) {
485 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
486 }
487 bool update_recompile_counts() const {
488 // Update recompile counts when either the update is explicitly requested (deoptimize)
489 // or the nmethod is not marked for deoptimization at all (not_marked).
490 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
491 return _mark_for_deoptimization_status != deoptimize_noupdate;
492 }
493
494 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
495
496 bool has_dependencies() { return dependencies_size() != 0; }
497 void flush_dependencies(BoolObjectClosure* is_alive);
498 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
499 void set_has_flushed_dependencies() {
500 assert(!has_flushed_dependencies(), "should only happen once");
501 _has_flushed_dependencies = 1;
502 }
503
504 bool has_unsafe_access() const { return _has_unsafe_access; }
505 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
506
507 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
508 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
509
510 bool is_lazy_critical_native() const { return _lazy_critical_native; }
511 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
512
513 bool has_wide_vectors() const { return _has_wide_vectors; }
514 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
515
516 int comp_level() const { return _comp_level; }
517
518 // Support for oops in scopes and relocs:
519 // Note: index 0 is reserved for null.
520 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
521 oop* oop_addr_at(int index) const { // for GC
522 // relocation indexes are biased by 1 (because 0 is reserved)
|