Print this page
rev 6875 : 8056240: Investigate increased GC remark time after class unloading changes in CRM Fuse
Reviewed-by: mgerdin, coleenp, bdelsart
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/code/nmethod.hpp
+++ new/src/share/vm/code/nmethod.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 26 #define SHARE_VM_CODE_NMETHOD_HPP
27 27
28 28 #include "code/codeBlob.hpp"
29 29 #include "code/pcDesc.hpp"
30 30 #include "oops/metadata.hpp"
31 31
32 32 // This class is used internally by nmethods, to cache
33 33 // exception/pc/handler information.
34 34
35 35 class ExceptionCache : public CHeapObj<mtCode> {
36 36 friend class VMStructs;
37 37 private:
38 38 enum { cache_size = 16 };
39 39 Klass* _exception_type;
40 40 address _pc[cache_size];
41 41 address _handler[cache_size];
42 42 int _count;
43 43 ExceptionCache* _next;
44 44
45 45 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
46 46 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47 47 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
48 48 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49 49 int count() { return _count; }
50 50 void increment_count() { _count++; }
51 51
52 52 public:
53 53
54 54 ExceptionCache(Handle exception, address pc, address handler);
55 55
56 56 Klass* exception_type() { return _exception_type; }
57 57 ExceptionCache* next() { return _next; }
58 58 void set_next(ExceptionCache *ec) { _next = ec; }
59 59
60 60 address match(Handle exception, address pc);
61 61 bool match_exception_with_space(Handle exception) ;
62 62 address test_address(address addr);
63 63 bool add_address_and_handler(address addr, address handler) ;
64 64 };
65 65
66 66
67 67 // cache pc descs found in earlier inquiries
68 68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
69 69 friend class VMStructs;
70 70 private:
71 71 enum { cache_size = 4 };
72 72 // The array elements MUST be volatile! Several threads may modify
73 73 // and read from the cache concurrently. find_pc_desc_internal has
74 74 // returned wrong results. C++ compiler (namely xlC12) may duplicate
75 75 // C++ field accesses if the elements are not volatile.
76 76 typedef PcDesc* PcDescPtr;
77 77 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
78 78 public:
79 79 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
80 80 void reset_to(PcDesc* initial_pc_desc);
81 81 PcDesc* find_pc_desc(int pc_offset, bool approximate);
82 82 void add_pc_desc(PcDesc* pc_desc);
83 83 PcDesc* last_pc_desc() { return _pc_descs[0]; }
84 84 };
85 85
86 86
87 87 // nmethods (native methods) are the compiled code versions of Java methods.
88 88 //
89 89 // An nmethod contains:
90 90 // - header (the nmethod structure)
91 91 // [Relocation]
92 92 // - relocation information
93 93 // - constant part (doubles, longs and floats used in nmethod)
94 94 // - oop table
95 95 // [Code]
96 96 // - code body
97 97 // - exception handler
98 98 // - stub code
99 99 // [Debugging information]
100 100 // - oop array
101 101 // - data array
102 102 // - pcs
103 103 // [Exception handler table]
104 104 // - handler entry point array
105 105 // [Implicit Null Pointer exception table]
106 106 // - implicit null table array
107 107
108 108 class Dependencies;
109 109 class ExceptionHandlerTable;
110 110 class ImplicitExceptionTable;
111 111 class AbstractCompiler;
112 112 class xmlStream;
113 113
114 114 class nmethod : public CodeBlob {
115 115 friend class VMStructs;
116 116 friend class NMethodSweeper;
117 117 friend class CodeCache; // scavengable oops
118 118 private:
119 119
120 120 // GC support to help figure out if an nmethod has been
121 121 // cleaned/unloaded by the current GC.
122 122 static unsigned char _global_unloading_clock;
123 123
124 124 // Shared fields for all nmethod's
125 125 Method* _method;
126 126 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
127 127 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
128 128
129 129 // To support simple linked-list chaining of nmethods:
130 130 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
131 131
132 132 union {
133 133 // Used by G1 to chain nmethods.
134 134 nmethod* _unloading_next;
135 135 // Used by non-G1 GCs to chain nmethods.
136 136 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
137 137 };
138 138
139 139 static nmethod* volatile _oops_do_mark_nmethods;
140 140 nmethod* volatile _oops_do_mark_link;
141 141
142 142 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
143 143
144 144 // offsets for entry points
145 145 address _entry_point; // entry point with class check
146 146 address _verified_entry_point; // entry point without class check
147 147 address _osr_entry_point; // entry point for on stack replacement
148 148
149 149 // Offsets for different nmethod parts
150 150 int _exception_offset;
151 151 // All deoptee's will resume execution at this location described by
152 152 // this offset.
153 153 int _deoptimize_offset;
154 154 // All deoptee's at a MethodHandle call site will resume execution
155 155 // at this location described by this offset.
156 156 int _deoptimize_mh_offset;
157 157 // Offset of the unwind handler if it exists
158 158 int _unwind_handler_offset;
159 159
160 160 #ifdef HAVE_DTRACE_H
161 161 int _trap_offset;
162 162 #endif // def HAVE_DTRACE_H
163 163 int _consts_offset;
164 164 int _stub_offset;
165 165 int _oops_offset; // offset to where embedded oop table begins (inside data)
166 166 int _metadata_offset; // embedded meta data table
167 167 int _scopes_data_offset;
168 168 int _scopes_pcs_offset;
169 169 int _dependencies_offset;
170 170 int _handler_table_offset;
171 171 int _nul_chk_table_offset;
172 172 int _nmethod_end_offset;
173 173
174 174 // location in frame (offset for sp) that deopt can store the original
175 175 // pc during a deopt.
176 176 int _orig_pc_offset;
177 177
178 178 int _compile_id; // which compilation made this nmethod
179 179 int _comp_level; // compilation level
180 180
181 181 // protected by CodeCache_lock
182 182 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
183 183
184 184 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
185 185 bool _marked_for_deoptimization; // Used for stack deoptimization
186 186
187 187 // used by jvmti to track if an unload event has been posted for this nmethod.
188 188 bool _unload_reported;
189 189
190 190 // set during construction
191 191 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
192 192 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
193 193 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
194 194 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
195 195
196 196 // Protected by Patching_lock
197 197 volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
198 198
199 199 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
200 200
201 201 #ifdef ASSERT
202 202 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
203 203 #endif
204 204
205 205 enum { in_use = 0, // executable nmethod
206 206 not_entrant = 1, // marked for deoptimization but activations may still exist,
207 207 // will be transformed to zombie when all activations are gone
208 208 zombie = 2, // no activations exist, nmethod is ready for purge
209 209 unloaded = 3 }; // there should be no activations, should not be called,
210 210 // will be transformed to zombie immediately
211 211
212 212 jbyte _scavenge_root_state;
213 213
214 214 #if INCLUDE_RTM_OPT
215 215 // RTM state at compile time. Used during deoptimization to decide
216 216 // whether to restart collecting RTM locking abort statistic again.
217 217 RTMState _rtm_state;
218 218 #endif
219 219
220 220 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
221 221 // and is not made into a zombie. However, once the nmethod is made into
222 222 // a zombie, it will be locked one final time if CompiledMethodUnload
223 223 // event processing needs to be done.
224 224 jint _lock_count;
225 225
226 226 // not_entrant method removal. Each mark_sweep pass will update
227 227 // this mark to current sweep invocation count if it is seen on the
228 228 // stack. An not_entrant method can be removed when there are no
229 229 // more activations, i.e., when the _stack_traversal_mark is less than
230 230 // current sweep traversal index.
231 231 long _stack_traversal_mark;
232 232
233 233 // The _hotness_counter indicates the hotness of a method. The higher
234 234 // the value the hotter the method. The hotness counter of a nmethod is
235 235 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
236 236 // is active while stack scanning (mark_active_nmethods()). The hotness
237 237 // counter is decreased (by 1) while sweeping.
238 238 int _hotness_counter;
239 239
240 240 ExceptionCache *_exception_cache;
241 241 PcDescCache _pc_desc_cache;
242 242
243 243 // These are used for compiled synchronized native methods to
244 244 // locate the owner and stack slot for the BasicLock so that we can
245 245 // properly revoke the bias of the owner if necessary. They are
246 246 // needed because there is no debug information for compiled native
247 247 // wrappers and the oop maps are insufficient to allow
248 248 // frame::retrieve_receiver() to work. Currently they are expected
249 249 // to be byte offsets from the Java stack pointer for maximum code
250 250 // sharing between platforms. Note that currently biased locking
251 251 // will never cause Class instances to be biased but this code
252 252 // handles the static synchronized case as well.
253 253 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
254 254 // for non-static native wrapper frames.
255 255 ByteSize _native_receiver_sp_offset;
256 256 ByteSize _native_basic_lock_sp_offset;
257 257
258 258 friend class nmethodLocker;
259 259
260 260 // For native wrappers
261 261 nmethod(Method* method,
262 262 int nmethod_size,
263 263 int compile_id,
264 264 CodeOffsets* offsets,
265 265 CodeBuffer *code_buffer,
266 266 int frame_size,
267 267 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
268 268 ByteSize basic_lock_sp_offset, /* synchronized natives only */
269 269 OopMapSet* oop_maps);
270 270
271 271 #ifdef HAVE_DTRACE_H
272 272 // For native wrappers
273 273 nmethod(Method* method,
274 274 int nmethod_size,
275 275 CodeOffsets* offsets,
276 276 CodeBuffer *code_buffer,
277 277 int frame_size);
278 278 #endif // def HAVE_DTRACE_H
279 279
280 280 // Creation support
281 281 nmethod(Method* method,
282 282 int nmethod_size,
283 283 int compile_id,
284 284 int entry_bci,
285 285 CodeOffsets* offsets,
286 286 int orig_pc_offset,
287 287 DebugInformationRecorder *recorder,
288 288 Dependencies* dependencies,
289 289 CodeBuffer *code_buffer,
290 290 int frame_size,
291 291 OopMapSet* oop_maps,
292 292 ExceptionHandlerTable* handler_table,
293 293 ImplicitExceptionTable* nul_chk_table,
294 294 AbstractCompiler* compiler,
295 295 int comp_level);
296 296
297 297 // helper methods
298 298 void* operator new(size_t size, int nmethod_size) throw();
299 299
300 300 const char* reloc_string_for(u_char* begin, u_char* end);
301 301 // Returns true if this thread changed the state of the nmethod or
302 302 // false if another thread performed the transition.
303 303 bool make_not_entrant_or_zombie(unsigned int state);
304 304 void inc_decompile_count();
305 305
306 306 // Used to manipulate the exception cache
307 307 void add_exception_cache_entry(ExceptionCache* new_entry);
308 308 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
309 309
310 310 // Inform external interfaces that a compiled method has been unloaded
311 311 void post_compiled_method_unload();
312 312
313 313 // Initailize fields to their default values
314 314 void init_defaults();
315 315
316 316 public:
317 317 // create nmethod with entry_bci
318 318 static nmethod* new_nmethod(methodHandle method,
319 319 int compile_id,
320 320 int entry_bci,
321 321 CodeOffsets* offsets,
322 322 int orig_pc_offset,
323 323 DebugInformationRecorder* recorder,
324 324 Dependencies* dependencies,
325 325 CodeBuffer *code_buffer,
326 326 int frame_size,
327 327 OopMapSet* oop_maps,
328 328 ExceptionHandlerTable* handler_table,
329 329 ImplicitExceptionTable* nul_chk_table,
330 330 AbstractCompiler* compiler,
331 331 int comp_level);
332 332
333 333 static nmethod* new_native_nmethod(methodHandle method,
334 334 int compile_id,
335 335 CodeBuffer *code_buffer,
336 336 int vep_offset,
337 337 int frame_complete,
338 338 int frame_size,
339 339 ByteSize receiver_sp_offset,
340 340 ByteSize basic_lock_sp_offset,
341 341 OopMapSet* oop_maps);
342 342
343 343 #ifdef HAVE_DTRACE_H
344 344 // The method we generate for a dtrace probe has to look
345 345 // like an nmethod as far as the rest of the system is concerned
346 346 // which is somewhat unfortunate.
347 347 static nmethod* new_dtrace_nmethod(methodHandle method,
348 348 CodeBuffer *code_buffer,
349 349 int vep_offset,
350 350 int trap_offset,
351 351 int frame_complete,
352 352 int frame_size);
353 353
354 354 int trap_offset() const { return _trap_offset; }
355 355 address trap_address() const { return insts_begin() + _trap_offset; }
356 356
357 357 #endif // def HAVE_DTRACE_H
358 358
359 359 // accessors
360 360 Method* method() const { return _method; }
361 361 AbstractCompiler* compiler() const { return _compiler; }
362 362
363 363 // type info
364 364 bool is_nmethod() const { return true; }
365 365 bool is_java_method() const { return !method()->is_native(); }
366 366 bool is_native_method() const { return method()->is_native(); }
367 367 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
368 368
369 369 bool is_compiled_by_c1() const;
370 370 bool is_compiled_by_c2() const;
371 371 bool is_compiled_by_shark() const;
372 372
373 373 // boundaries for different parts
374 374 address consts_begin () const { return header_begin() + _consts_offset ; }
375 375 address consts_end () const { return header_begin() + code_offset() ; }
376 376 address insts_begin () const { return header_begin() + code_offset() ; }
377 377 address insts_end () const { return header_begin() + _stub_offset ; }
378 378 address stub_begin () const { return header_begin() + _stub_offset ; }
379 379 address stub_end () const { return header_begin() + _oops_offset ; }
380 380 address exception_begin () const { return header_begin() + _exception_offset ; }
381 381 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
382 382 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
383 383 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
384 384 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
385 385 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
386 386
387 387 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
388 388 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
389 389
390 390 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
391 391 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
392 392 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
393 393 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
394 394 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
395 395 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
396 396 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
397 397 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
398 398 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
399 399 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
400 400
401 401 // Sizes
402 402 int consts_size () const { return consts_end () - consts_begin (); }
403 403 int insts_size () const { return insts_end () - insts_begin (); }
404 404 int stub_size () const { return stub_end () - stub_begin (); }
405 405 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
406 406 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
407 407 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
408 408 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
409 409 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
410 410 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
411 411 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
412 412
413 413 int total_size () const;
414 414
415 415 void dec_hotness_counter() { _hotness_counter--; }
416 416 void set_hotness_counter(int val) { _hotness_counter = val; }
417 417 int hotness_counter() const { return _hotness_counter; }
418 418
419 419 // Containment
420 420 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
421 421 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
422 422 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
423 423 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
424 424 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
425 425 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
426 426 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
427 427 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
428 428 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
429 429
430 430 // entry points
431 431 address entry_point() const { return _entry_point; } // normal entry point
432 432 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
433 433
434 434 // flag accessing and manipulation
435 435 bool is_in_use() const { return _state == in_use; }
436 436 bool is_alive() const { return _state == in_use || _state == not_entrant; }
437 437 bool is_not_entrant() const { return _state == not_entrant; }
438 438 bool is_zombie() const { return _state == zombie; }
439 439 bool is_unloaded() const { return _state == unloaded; }
440 440
441 441 #if INCLUDE_RTM_OPT
442 442 // rtm state accessing and manipulating
443 443 RTMState rtm_state() const { return _rtm_state; }
444 444 void set_rtm_state(RTMState state) { _rtm_state = state; }
445 445 #endif
446 446
447 447 // Make the nmethod non entrant. The nmethod will continue to be
448 448 // alive. It is used when an uncommon trap happens. Returns true
449 449 // if this thread changed the state of the nmethod or false if
450 450 // another thread performed the transition.
451 451 bool make_not_entrant() {
452 452 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
453 453 return make_not_entrant_or_zombie(not_entrant);
454 454 }
455 455 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
456 456
457 457 // used by jvmti to track if the unload event has been reported
458 458 bool unload_reported() { return _unload_reported; }
459 459 void set_unload_reported() { _unload_reported = true; }
460 460
461 461 void set_unloading_next(nmethod* next) { _unloading_next = next; }
462 462 nmethod* unloading_next() { return _unloading_next; }
463 463
464 464 static unsigned char global_unloading_clock() { return _global_unloading_clock; }
465 465 static void increase_unloading_clock();
466 466
467 467 void set_unloading_clock(unsigned char unloading_clock);
468 468 unsigned char unloading_clock();
469 469
470 470 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
471 471 void mark_for_deoptimization() { _marked_for_deoptimization = true; }
472 472
473 473 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
474 474
475 475 bool has_dependencies() { return dependencies_size() != 0; }
476 476 void flush_dependencies(BoolObjectClosure* is_alive);
477 477 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
478 478 void set_has_flushed_dependencies() {
479 479 assert(!has_flushed_dependencies(), "should only happen once");
480 480 _has_flushed_dependencies = 1;
481 481 }
482 482
483 483 bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
484 484 void mark_for_reclamation() { _marked_for_reclamation = 1; }
485 485
486 486 bool has_unsafe_access() const { return _has_unsafe_access; }
487 487 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
488 488
489 489 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
490 490 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
491 491
492 492 bool is_lazy_critical_native() const { return _lazy_critical_native; }
493 493 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
494 494
495 495 bool has_wide_vectors() const { return _has_wide_vectors; }
496 496 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
497 497
498 498 int comp_level() const { return _comp_level; }
499 499
500 500 // Support for oops in scopes and relocs:
501 501 // Note: index 0 is reserved for null.
502 502 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
503 503 oop* oop_addr_at(int index) const { // for GC
504 504 // relocation indexes are biased by 1 (because 0 is reserved)
505 505 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
506 506 assert(!_oops_are_stale, "oops are stale");
507 507 return &oops_begin()[index - 1];
508 508 }
509 509
510 510 // Support for meta data in scopes and relocs:
511 511 // Note: index 0 is reserved for null.
512 512 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
513 513 Metadata** metadata_addr_at(int index) const { // for GC
514 514 // relocation indexes are biased by 1 (because 0 is reserved)
515 515 assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
516 516 return &metadata_begin()[index - 1];
517 517 }
518 518
519 519 void copy_values(GrowableArray<jobject>* oops);
520 520 void copy_values(GrowableArray<Metadata*>* metadata);
521 521
522 522 // Relocation support
523 523 private:
524 524 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
525 525 inline void initialize_immediate_oop(oop* dest, jobject handle);
526 526
527 527 public:
528 528 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
529 529 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
530 530 void verify_oop_relocations();
531 531
532 532 bool is_at_poll_return(address pc);
533 533 bool is_at_poll_or_poll_return(address pc);
534 534
535 535 // Scavengable oop support
536 536 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
537 537 protected:
538 538 enum { sl_on_list = 0x01, sl_marked = 0x10 };
539 539 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
540 540 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
541 541 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
542 542 #ifndef PRODUCT
543 543 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
544 544 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
545 545 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
546 546 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
547 547 #endif //PRODUCT
548 548 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
549 549 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
550 550
551 551 public:
552 552
553 553 // Sweeper support
554 554 long stack_traversal_mark() { return _stack_traversal_mark; }
555 555 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
556 556
557 557 // Exception cache support
558 558 ExceptionCache* exception_cache() const { return _exception_cache; }
559 559 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
560 560 address handler_for_exception_and_pc(Handle exception, address pc);
561 561 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
562 562 void clean_exception_cache(BoolObjectClosure* is_alive);
563 563
564 564 // implicit exceptions support
565 565 address continuation_for_implicit_exception(address pc);
566 566
567 567 // On-stack replacement support
568 568 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
569 569 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
570 570 void invalidate_osr_method();
571 571 nmethod* osr_link() const { return _osr_link; }
572 572 void set_osr_link(nmethod *n) { _osr_link = n; }
573 573
574 574 // tells whether frames described by this nmethod can be deoptimized
575 575 // note: native wrappers cannot be deoptimized.
576 576 bool can_be_deoptimized() const { return is_java_method(); }
577 577
578 578 // Inline cache support
579 579 void clear_inline_caches();
580 580 void cleanup_inline_caches();
581 581 bool inlinecache_check_contains(address addr) const {
582 582 return (addr >= code_begin() && addr < verified_entry_point());
583 583 }
584 584
585 585 // Verify calls to dead methods have been cleaned.
586 586 void verify_clean_inline_caches();
587 587 // Verify and count cached icholder relocations.
588 588 int verify_icholder_relocations();
589 589 // Check that all metadata is still alive
590 590 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
591 591
592 592 // unlink and deallocate this nmethod
593 593 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
594 594 // expected to use any other private methods/data in this class.
595 595
596 596 protected:
597 597 void flush();
598 598
599 599 public:
600 600 // When true is returned, it is unsafe to remove this nmethod even if
601 601 // it is a zombie, since the VM or the ServiceThread might still be
602 602 // using it.
603 603 bool is_locked_by_vm() const { return _lock_count >0; }
604 604
605 605 // See comment at definition of _last_seen_on_stack
606 606 void mark_as_seen_on_stack();
↓ open down ↓ |
606 lines elided |
↑ open up ↑ |
607 607 bool can_not_entrant_be_converted();
608 608
609 609 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
610 610 void set_method(Method* method) { _method = method; }
611 611
612 612 // GC support
613 613 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
614 614 // The parallel versions are used by G1.
615 615 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
616 616 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
617 +
618 + private:
617 619 // Unload a nmethod if the *root object is dead.
618 620 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
621 + bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
619 622
623 + void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
624 + void mark_metadata_on_stack_non_relocs();
625 +
626 + public:
620 627 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
621 628 OopClosure* f);
622 629 void oops_do(OopClosure* f) { oops_do(f, false); }
623 630 void oops_do(OopClosure* f, bool allow_zombie);
624 631 bool detect_scavenge_root_oops();
625 632 void verify_scavenge_root_oops() PRODUCT_RETURN;
626 633
627 634 bool test_set_oops_do_mark();
628 635 static void oops_do_marking_prologue();
629 636 static void oops_do_marking_epilogue();
630 637 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
631 638 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
632 639
633 640 // ScopeDesc for an instruction
634 641 ScopeDesc* scope_desc_at(address pc);
635 642
636 643 private:
637 644 ScopeDesc* scope_desc_in(address begin, address end);
638 645
639 646 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
640 647
641 648 PcDesc* find_pc_desc_internal(address pc, bool approximate);
642 649
643 650 PcDesc* find_pc_desc(address pc, bool approximate) {
644 651 PcDesc* desc = _pc_desc_cache.last_pc_desc();
645 652 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
646 653 return desc;
647 654 }
648 655 return find_pc_desc_internal(pc, approximate);
649 656 }
650 657
651 658 public:
652 659 // ScopeDesc retrieval operation
653 660 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
654 661 // pc_desc_near returns the first PcDesc at or after the givne pc.
655 662 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
656 663
657 664 public:
658 665 // copying of debugging information
659 666 void copy_scopes_pcs(PcDesc* pcs, int count);
660 667 void copy_scopes_data(address buffer, int size);
661 668
662 669 // Deopt
663 670 // Return true is the PC is one would expect if the frame is being deopted.
664 671 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
665 672 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
666 673 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
667 674 // Accessor/mutator for the original pc of a frame before a frame was deopted.
668 675 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
669 676 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
670 677
671 678 static address get_deopt_original_pc(const frame* fr);
672 679
673 680 // MethodHandle
674 681 bool is_method_handle_return(address return_pc);
675 682
676 683 // jvmti support:
677 684 void post_compiled_method_load_event();
678 685 jmethodID get_and_cache_jmethod_id();
679 686
680 687 // verify operations
681 688 void verify();
682 689 void verify_scopes();
683 690 void verify_interrupt_point(address interrupt_point);
684 691
685 692 // printing support
686 693 void print() const;
687 694 void print_code();
688 695 void print_relocations() PRODUCT_RETURN;
689 696 void print_pcs() PRODUCT_RETURN;
690 697 void print_scopes() PRODUCT_RETURN;
691 698 void print_dependencies() PRODUCT_RETURN;
692 699 void print_value_on(outputStream* st) const PRODUCT_RETURN;
693 700 void print_calls(outputStream* st) PRODUCT_RETURN;
694 701 void print_handler_table() PRODUCT_RETURN;
695 702 void print_nul_chk_table() PRODUCT_RETURN;
696 703 void print_nmethod(bool print_code);
697 704
698 705 // need to re-define this from CodeBlob else the overload hides it
699 706 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
700 707 void print_on(outputStream* st, const char* msg) const;
701 708
702 709 // Logging
703 710 void log_identity(xmlStream* log) const;
704 711 void log_new_nmethod() const;
705 712 void log_state_change() const;
706 713
707 714 // Prints block-level comments, including nmethod specific block labels:
708 715 virtual void print_block_comment(outputStream* stream, address block_begin) const {
709 716 print_nmethod_labels(stream, block_begin);
710 717 CodeBlob::print_block_comment(stream, block_begin);
711 718 }
712 719 void print_nmethod_labels(outputStream* stream, address block_begin) const;
713 720
714 721 // Prints a comment for one native instruction (reloc info, pc desc)
715 722 void print_code_comment_on(outputStream* st, int column, address begin, address end);
716 723 static void print_statistics() PRODUCT_RETURN;
717 724
718 725 // Compiler task identification. Note that all OSR methods
719 726 // are numbered in an independent sequence if CICountOSR is true,
720 727 // and native method wrappers are also numbered independently if
721 728 // CICountNative is true.
722 729 int compile_id() const { return _compile_id; }
723 730 const char* compile_kind() const;
724 731
725 732 // For debugging
726 733 // CompiledIC* IC_at(char* p) const;
727 734 // PrimitiveIC* primitiveIC_at(char* p) const;
728 735 oop embeddedOop_at(address p);
729 736
730 737 // tells if any of this method's dependencies have been invalidated
731 738 // (this is expensive!)
732 739 bool check_all_dependencies();
733 740
734 741 // tells if this compiled method is dependent on the given changes,
735 742 // and the changes have invalidated it
736 743 bool check_dependency_on(DepChange& changes);
737 744
738 745 // Evolution support. Tells if this compiled method is dependent on any of
739 746 // methods m() of class dependee, such that if m() in dependee is replaced,
740 747 // this compiled method will have to be deoptimized.
741 748 bool is_evol_dependent_on(Klass* dependee);
742 749
743 750 // Fast breakpoint support. Tells if this compiled method is
744 751 // dependent on the given method. Returns true if this nmethod
745 752 // corresponds to the given method as well.
746 753 bool is_dependent_on_method(Method* dependee);
747 754
748 755 // is it ok to patch at address?
749 756 bool is_patchable_at(address instr_address);
750 757
751 758 // UseBiasedLocking support
752 759 ByteSize native_receiver_sp_offset() {
753 760 return _native_receiver_sp_offset;
754 761 }
755 762 ByteSize native_basic_lock_sp_offset() {
756 763 return _native_basic_lock_sp_offset;
757 764 }
758 765
759 766 // support for code generation
760 767 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
761 768 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
762 769 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
763 770
764 771 // RedefineClasses support. Mark metadata in nmethods as on_stack so that
765 772 // redefine classes doesn't purge it.
766 773 static void mark_on_stack(nmethod* nm) {
767 774 nm->metadata_do(Metadata::mark_on_stack);
768 775 }
769 776 void metadata_do(void f(Metadata*));
770 777 };
771 778
772 779 // Locks an nmethod so its code will not get removed and it will not
773 780 // be made into a zombie, even if it is a not_entrant method. After the
774 781 // nmethod becomes a zombie, if CompiledMethodUnload event processing
775 782 // needs to be done, then lock_nmethod() is used directly to keep the
776 783 // generated code from being reused too early.
777 784 class nmethodLocker : public StackObj {
778 785 nmethod* _nm;
779 786
780 787 public:
781 788
782 789 // note: nm can be NULL
783 790 // Only JvmtiDeferredEvent::compiled_method_unload_event()
784 791 // should pass zombie_ok == true.
785 792 static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
786 793 static void unlock_nmethod(nmethod* nm); // (ditto)
787 794
788 795 nmethodLocker(address pc); // derive nm from pc
789 796 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
790 797 nmethodLocker() { _nm = NULL; }
791 798 ~nmethodLocker() { unlock_nmethod(_nm); }
792 799
793 800 nmethod* code() { return _nm; }
794 801 void set_code(nmethod* new_nm) {
795 802 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
796 803 _nm = new_nm;
797 804 lock_nmethod(_nm);
798 805 }
799 806 };
800 807
801 808 #endif // SHARE_VM_CODE_NMETHOD_HPP
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX