Print this page
rev 6867 : 8061817: Whitebox.deoptimizeMethod() does not deoptimize all OSR versions of method
Summary: Fixed Whitebox.deoptimizeMethod() to deoptimize all OSR versions of the method.
Reviewed-by: kvn, iignatyev
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/oops/method.hpp
+++ new/src/share/vm/oops/method.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP
26 26 #define SHARE_VM_OOPS_METHODOOP_HPP
27 27
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/compressedStream.hpp"
30 30 #include "compiler/oopMap.hpp"
31 31 #include "interpreter/invocationCounter.hpp"
32 32 #include "oops/annotations.hpp"
33 33 #include "oops/constantPool.hpp"
34 34 #include "oops/methodCounters.hpp"
35 35 #include "oops/instanceKlass.hpp"
36 36 #include "oops/oop.hpp"
37 37 #include "oops/typeArrayOop.hpp"
38 38 #include "utilities/accessFlags.hpp"
39 39 #include "utilities/growableArray.hpp"
40 40
41 41 // A Method* represents a Java method.
42 42 //
43 43 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
44 44 // so keeping the size of this structure small has a big impact on footprint.
45 45 //
46 46 // We put all oops and method_size first for better gc cache locality.
47 47 //
48 48 // The actual bytecodes are inlined after the end of the Method struct.
49 49 //
50 50 // There are bits in the access_flags telling whether inlined tables are present.
51 51 // Note that accessing the line number and local variable tables is not performance critical at all.
52 52 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
53 53 // to it fast.
54 54 //
55 55 // The line number table is compressed and inlined following the byte codes. It is found as the first
56 56 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
57 57 // after the line number table, and indexed from the end of the method. We do not compress the checked
58 58 // exceptions table since the average length is less than 2, and do not bother to compress the local
59 59 // variable table either since it is mostly absent.
60 60 //
61 61 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
62 62 //
63 63 // |------------------------------------------------------|
64 64 // | header |
65 65 // | klass |
66 66 // |------------------------------------------------------|
67 67 // | ConstMethod* (oop) |
68 68 // |------------------------------------------------------|
69 69 // | methodData (oop) |
70 70 // | methodCounters |
71 71 // |------------------------------------------------------|
72 72 // | access_flags |
73 73 // | vtable_index |
74 74 // |------------------------------------------------------|
75 75 // | result_index (C++ interpreter only) |
76 76 // |------------------------------------------------------|
77 77 // | method_size | intrinsic_id| flags |
78 78 // |------------------------------------------------------|
79 79 // | code (pointer) |
80 80 // | i2i (pointer) |
81 81 // | adapter (pointer) |
82 82 // | from_compiled_entry (pointer) |
83 83 // | from_interpreted_entry (pointer) |
84 84 // |------------------------------------------------------|
85 85 // | native_function (present only if native) |
86 86 // | signature_handler (present only if native) |
87 87 // |------------------------------------------------------|
88 88
89 89
90 90 class CheckedExceptionElement;
91 91 class LocalVariableTableElement;
92 92 class AdapterHandlerEntry;
93 93 class MethodData;
94 94 class MethodCounters;
95 95 class ConstMethod;
96 96 class InlineTableSizes;
97 97 class KlassSizeStats;
98 98
99 99 class Method : public Metadata {
100 100 friend class VMStructs;
101 101 private:
102 102 ConstMethod* _constMethod; // Method read-only data.
103 103 MethodData* _method_data;
104 104 MethodCounters* _method_counters;
105 105 AccessFlags _access_flags; // Access flags
106 106 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
107 107 // note: can have vtables with >2**16 elements (because of inheritance)
108 108 #ifdef CC_INTERP
109 109 int _result_index; // C++ interpreter needs for converting results to/from stack
110 110 #endif
111 111 u2 _method_size; // size of this object
112 112 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
113 113 u1 _jfr_towrite : 1, // Flags
114 114 _caller_sensitive : 1,
115 115 _force_inline : 1,
116 116 _hidden : 1,
117 117 _dont_inline : 1,
118 118 : 3;
119 119
120 120 #ifndef PRODUCT
121 121 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
122 122 #endif
123 123 // Entry point for calling both from and to the interpreter.
124 124 address _i2i_entry; // All-args-on-stack calling convention
125 125 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
126 126 AdapterHandlerEntry* _adapter;
127 127 // Entry point for calling from compiled code, to compiled code if it exists
128 128 // or else the interpreter.
129 129 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
130 130 // The entry point for calling both from and to compiled code is
131 131 // "_code->entry_point()". Because of tiered compilation and de-opt, this
132 132 // field can come and go. It can transition from NULL to not-null at any
133 133 // time (whenever a compile completes). It can transition from not-null to
134 134 // NULL only at safepoints (because of a de-opt).
135 135 nmethod* volatile _code; // Points to the corresponding piece of native code
136 136 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
137 137
138 138 // Constructor
139 139 Method(ConstMethod* xconst, AccessFlags access_flags, int size);
140 140 public:
141 141
142 142 static Method* allocate(ClassLoaderData* loader_data,
143 143 int byte_code_size,
144 144 AccessFlags access_flags,
145 145 InlineTableSizes* sizes,
146 146 ConstMethod::MethodType method_type,
147 147 TRAPS);
148 148
149 149 // CDS and vtbl checking can create an empty Method to get vtbl pointer.
150 150 Method(){}
151 151
152 152 // The Method vtable is restored by this call when the Method is in the
153 153 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
154 154 // all the gory details. SA, dtrace and pstack helpers distinguish metadata
155 155 // by their vtable.
156 156 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
157 157 bool is_method() const volatile { return true; }
158 158
159 159 void restore_unshareable_info(TRAPS);
160 160
161 161 // accessors for instance variables
162 162
163 163 ConstMethod* constMethod() const { return _constMethod; }
164 164 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
165 165
166 166
167 167 static address make_adapters(methodHandle mh, TRAPS);
168 168 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
169 169 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
170 170
171 171 // access flag
172 172 AccessFlags access_flags() const { return _access_flags; }
173 173 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
174 174
175 175 // name
176 176 Symbol* name() const { return constants()->symbol_at(name_index()); }
177 177 int name_index() const { return constMethod()->name_index(); }
178 178 void set_name_index(int index) { constMethod()->set_name_index(index); }
179 179
180 180 // signature
181 181 Symbol* signature() const { return constants()->symbol_at(signature_index()); }
182 182 int signature_index() const { return constMethod()->signature_index(); }
183 183 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
184 184
185 185 // generics support
186 186 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
187 187 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
188 188 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
189 189
190 190 // annotations support
191 191 AnnotationArray* annotations() const {
192 192 return constMethod()->method_annotations();
193 193 }
194 194 AnnotationArray* parameter_annotations() const {
195 195 return constMethod()->parameter_annotations();
196 196 }
197 197 AnnotationArray* annotation_default() const {
198 198 return constMethod()->default_annotations();
199 199 }
200 200 AnnotationArray* type_annotations() const {
201 201 return constMethod()->type_annotations();
202 202 }
203 203
204 204 #ifdef CC_INTERP
205 205 void set_result_index(BasicType type);
206 206 int result_index() { return _result_index; }
207 207 #endif
208 208
209 209 // Helper routine: get klass name + "." + method name + signature as
210 210 // C string, for the purpose of providing more useful NoSuchMethodErrors
211 211 // and fatal error handling. The string is allocated in resource
212 212 // area if a buffer is not provided by the caller.
213 213 char* name_and_sig_as_C_string() const;
214 214 char* name_and_sig_as_C_string(char* buf, int size) const;
215 215
216 216 // Static routine in the situations we don't have a Method*
217 217 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
218 218 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
219 219
220 220 Bytecodes::Code java_code_at(int bci) const {
221 221 return Bytecodes::java_code_at(this, bcp_from(bci));
222 222 }
223 223 Bytecodes::Code code_at(int bci) const {
224 224 return Bytecodes::code_at(this, bcp_from(bci));
225 225 }
226 226
227 227 // JVMTI breakpoints
228 228 Bytecodes::Code orig_bytecode_at(int bci) const;
229 229 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
230 230 void set_breakpoint(int bci);
231 231 void clear_breakpoint(int bci);
232 232 void clear_all_breakpoints();
233 233 // Tracking number of breakpoints, for fullspeed debugging.
234 234 // Only mutated by VM thread.
235 235 u2 number_of_breakpoints() const {
236 236 MethodCounters* mcs = method_counters();
237 237 if (mcs == NULL) {
238 238 return 0;
239 239 } else {
240 240 return mcs->number_of_breakpoints();
241 241 }
242 242 }
243 243 void incr_number_of_breakpoints(TRAPS) {
244 244 MethodCounters* mcs = get_method_counters(CHECK);
245 245 if (mcs != NULL) {
246 246 mcs->incr_number_of_breakpoints();
247 247 }
248 248 }
249 249 void decr_number_of_breakpoints(TRAPS) {
250 250 MethodCounters* mcs = get_method_counters(CHECK);
251 251 if (mcs != NULL) {
252 252 mcs->decr_number_of_breakpoints();
253 253 }
254 254 }
255 255 // Initialization only
256 256 void clear_number_of_breakpoints() {
257 257 MethodCounters* mcs = method_counters();
258 258 if (mcs != NULL) {
259 259 mcs->clear_number_of_breakpoints();
260 260 }
261 261 }
262 262
263 263 // index into InstanceKlass methods() array
264 264 // note: also used by jfr
265 265 u2 method_idnum() const { return constMethod()->method_idnum(); }
266 266 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
267 267
268 268 // code size
269 269 int code_size() const { return constMethod()->code_size(); }
270 270
271 271 // method size
272 272 int method_size() const { return _method_size; }
273 273 void set_method_size(int size) {
274 274 assert(0 <= size && size < (1 << 16), "invalid method size");
275 275 _method_size = size;
276 276 }
277 277
278 278 // constant pool for Klass* holding this method
279 279 ConstantPool* constants() const { return constMethod()->constants(); }
280 280 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
281 281
282 282 // max stack
283 283 // return original max stack size for method verification
284 284 int verifier_max_stack() const { return constMethod()->max_stack(); }
285 285 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
286 286 void set_max_stack(int size) { constMethod()->set_max_stack(size); }
287 287
288 288 // max locals
289 289 int max_locals() const { return constMethod()->max_locals(); }
290 290 void set_max_locals(int size) { constMethod()->set_max_locals(size); }
291 291
292 292 int highest_comp_level() const;
293 293 void set_highest_comp_level(int level);
294 294 int highest_osr_comp_level() const;
295 295 void set_highest_osr_comp_level(int level);
296 296
297 297 // Count of times method was exited via exception while interpreting
298 298 void interpreter_throwout_increment(TRAPS) {
299 299 MethodCounters* mcs = get_method_counters(CHECK);
300 300 if (mcs != NULL) {
301 301 mcs->interpreter_throwout_increment();
302 302 }
303 303 }
304 304
305 305 int interpreter_throwout_count() const {
306 306 MethodCounters* mcs = method_counters();
307 307 if (mcs == NULL) {
308 308 return 0;
309 309 } else {
310 310 return mcs->interpreter_throwout_count();
311 311 }
312 312 }
313 313
314 314 // size of parameters
315 315 int size_of_parameters() const { return constMethod()->size_of_parameters(); }
316 316 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
317 317
318 318 bool has_stackmap_table() const {
319 319 return constMethod()->has_stackmap_table();
320 320 }
321 321
322 322 Array<u1>* stackmap_data() const {
323 323 return constMethod()->stackmap_data();
324 324 }
325 325
326 326 void set_stackmap_data(Array<u1>* sd) {
327 327 constMethod()->set_stackmap_data(sd);
328 328 }
329 329
330 330 // exception handler table
331 331 bool has_exception_handler() const
332 332 { return constMethod()->has_exception_handler(); }
333 333 int exception_table_length() const
334 334 { return constMethod()->exception_table_length(); }
335 335 ExceptionTableElement* exception_table_start() const
336 336 { return constMethod()->exception_table_start(); }
337 337
338 338 // Finds the first entry point bci of an exception handler for an
339 339 // exception of klass ex_klass thrown at throw_bci. A value of NULL
340 340 // for ex_klass indicates that the exception klass is not known; in
341 341 // this case it matches any constraint class. Returns -1 if the
342 342 // exception cannot be handled in this method. The handler
343 343 // constraint classes are loaded if necessary. Note that this may
344 344 // throw an exception if loading of the constraint classes causes
345 345 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
346 346 // If an exception is thrown, returns the bci of the
347 347 // exception handler which caused the exception to be thrown, which
348 348 // is needed for proper retries. See, for example,
349 349 // InterpreterRuntime::exception_handler_for_exception.
350 350 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
351 351
352 352 // method data access
353 353 MethodData* method_data() const {
354 354 return _method_data;
355 355 }
356 356
357 357 void set_method_data(MethodData* data) {
358 358 // The store into method must be released. On platforms without
359 359 // total store order (TSO) the reference may become visible before
360 360 // the initialization of data otherwise.
361 361 OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
362 362 }
363 363
364 364 MethodCounters* method_counters() const {
365 365 return _method_counters;
366 366 }
367 367
368 368 void clear_method_counters() {
369 369 _method_counters = NULL;
370 370 }
371 371
372 372 bool init_method_counters(MethodCounters* counters) {
373 373 // Try to install a pointer to MethodCounters, return true on success.
374 374 return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
375 375 }
376 376
377 377 #ifdef TIERED
378 378 // We are reusing interpreter_invocation_count as a holder for the previous event count!
379 379 // We can do that since interpreter_invocation_count is not used in tiered.
380 380 int prev_event_count() const {
381 381 if (method_counters() == NULL) {
382 382 return 0;
383 383 } else {
384 384 return method_counters()->interpreter_invocation_count();
385 385 }
386 386 }
387 387 void set_prev_event_count(int count) {
388 388 MethodCounters* mcs = method_counters();
389 389 if (mcs != NULL) {
390 390 mcs->set_interpreter_invocation_count(count);
391 391 }
392 392 }
393 393 jlong prev_time() const {
394 394 MethodCounters* mcs = method_counters();
395 395 return mcs == NULL ? 0 : mcs->prev_time();
396 396 }
397 397 void set_prev_time(jlong time) {
398 398 MethodCounters* mcs = method_counters();
399 399 if (mcs != NULL) {
400 400 mcs->set_prev_time(time);
401 401 }
402 402 }
403 403 float rate() const {
404 404 MethodCounters* mcs = method_counters();
405 405 return mcs == NULL ? 0 : mcs->rate();
406 406 }
407 407 void set_rate(float rate) {
408 408 MethodCounters* mcs = method_counters();
409 409 if (mcs != NULL) {
410 410 mcs->set_rate(rate);
411 411 }
412 412 }
413 413 #endif
414 414
415 415 int invocation_count();
416 416 int backedge_count();
417 417
418 418 bool was_executed_more_than(int n);
419 419 bool was_never_executed() { return !was_executed_more_than(0); }
420 420
421 421 static void build_interpreter_method_data(methodHandle method, TRAPS);
422 422
423 423 static MethodCounters* build_method_counters(Method* m, TRAPS);
424 424
425 425 int interpreter_invocation_count() {
426 426 if (TieredCompilation) {
427 427 return invocation_count();
428 428 } else {
429 429 MethodCounters* mcs = method_counters();
430 430 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
431 431 }
432 432 }
433 433 int increment_interpreter_invocation_count(TRAPS) {
434 434 if (TieredCompilation) ShouldNotReachHere();
435 435 MethodCounters* mcs = get_method_counters(CHECK_0);
436 436 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
437 437 }
438 438
439 439 #ifndef PRODUCT
440 440 int compiled_invocation_count() const { return _compiled_invocation_count; }
441 441 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
442 442 #endif // not PRODUCT
443 443
444 444 // Clear (non-shared space) pointers which could not be relevant
445 445 // if this (shared) method were mapped into another JVM.
446 446 void remove_unshareable_info();
447 447
448 448 // nmethod/verified compiler entry
449 449 address verified_code_entry();
450 450 bool check_code() const; // Not inline to avoid circular ref
451 451 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
452 452 void clear_code(); // Clear out any compiled code
453 453 static void set_code(methodHandle mh, nmethod* code);
454 454 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
455 455 address get_i2c_entry();
456 456 address get_c2i_entry();
457 457 address get_c2i_unverified_entry();
458 458 AdapterHandlerEntry* adapter() { return _adapter; }
459 459 // setup entry points
460 460 void link_method(methodHandle method, TRAPS);
461 461 // clear entry points. Used by sharing code
462 462 void unlink_method();
463 463
464 464 // vtable index
465 465 enum VtableIndexFlag {
466 466 // Valid vtable indexes are non-negative (>= 0).
467 467 // These few negative values are used as sentinels.
468 468 itable_index_max = -10, // first itable index, growing downward
469 469 pending_itable_index = -9, // itable index will be assigned
470 470 invalid_vtable_index = -4, // distinct from any valid vtable index
471 471 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
472 472 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
473 473 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
474 474 };
475 475 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
476 476 bool has_vtable_index() const { return _vtable_index >= 0; }
477 477 int vtable_index() const { return _vtable_index; }
478 478 void set_vtable_index(int index) { _vtable_index = index; }
479 479 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
480 480 bool has_itable_index() const { return _vtable_index <= itable_index_max; }
481 481 int itable_index() const { assert(valid_itable_index(), "");
482 482 return itable_index_max - _vtable_index; }
483 483 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
484 484
485 485 // interpreter entry
486 486 address interpreter_entry() const { return _i2i_entry; }
487 487 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
488 488 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
489 489
490 490 // native function (used for native methods only)
491 491 enum {
492 492 native_bind_event_is_interesting = true
493 493 };
494 494 address native_function() const { return *(native_function_addr()); }
495 495 address critical_native_function();
496 496
497 497 // Must specify a real function (not NULL).
498 498 // Use clear_native_function() to unregister.
499 499 void set_native_function(address function, bool post_event_flag);
500 500 bool has_native_function() const;
501 501 void clear_native_function();
502 502
503 503 // signature handler (used for native methods only)
504 504 address signature_handler() const { return *(signature_handler_addr()); }
505 505 void set_signature_handler(address handler);
506 506
507 507 // Interpreter oopmap support
508 508 void mask_for(int bci, InterpreterOopMap* mask);
509 509
510 510 #ifndef PRODUCT
511 511 // operations on invocation counter
512 512 void print_invocation_count();
513 513 #endif
514 514
515 515 // byte codes
516 516 void set_code(address code) { return constMethod()->set_code(code); }
517 517 address code_base() const { return constMethod()->code_base(); }
518 518 bool contains(address bcp) const { return constMethod()->contains(bcp); }
519 519
520 520 // prints byte codes
521 521 void print_codes() const { print_codes_on(tty); }
522 522 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
523 523 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
524 524
525 525 // method parameters
526 526 bool has_method_parameters() const
527 527 { return constMethod()->has_method_parameters(); }
528 528 int method_parameters_length() const
529 529 { return constMethod()->method_parameters_length(); }
530 530 MethodParametersElement* method_parameters_start() const
531 531 { return constMethod()->method_parameters_start(); }
532 532
533 533 // checked exceptions
534 534 int checked_exceptions_length() const
535 535 { return constMethod()->checked_exceptions_length(); }
536 536 CheckedExceptionElement* checked_exceptions_start() const
537 537 { return constMethod()->checked_exceptions_start(); }
538 538
539 539 // localvariable table
540 540 bool has_localvariable_table() const
541 541 { return constMethod()->has_localvariable_table(); }
542 542 int localvariable_table_length() const
543 543 { return constMethod()->localvariable_table_length(); }
544 544 LocalVariableTableElement* localvariable_table_start() const
545 545 { return constMethod()->localvariable_table_start(); }
546 546
547 547 bool has_linenumber_table() const
548 548 { return constMethod()->has_linenumber_table(); }
549 549 u_char* compressed_linenumber_table() const
550 550 { return constMethod()->compressed_linenumber_table(); }
551 551
552 552 // method holder (the Klass* holding this method)
553 553 InstanceKlass* method_holder() const { return constants()->pool_holder(); }
554 554
555 555 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
556 556 Symbol* klass_name() const; // returns the name of the method holder
557 557 BasicType result_type() const; // type of the method result
558 558 int result_type_index() const; // type index of the method result
559 559 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
560 560 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
561 561
562 562 // Checked exceptions thrown by this method (resolved to mirrors)
563 563 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
564 564
565 565 // Access flags
566 566 bool is_public() const { return access_flags().is_public(); }
567 567 bool is_private() const { return access_flags().is_private(); }
568 568 bool is_protected() const { return access_flags().is_protected(); }
569 569 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
570 570 bool is_static() const { return access_flags().is_static(); }
571 571 bool is_final() const { return access_flags().is_final(); }
572 572 bool is_synchronized() const { return access_flags().is_synchronized();}
573 573 bool is_native() const { return access_flags().is_native(); }
574 574 bool is_abstract() const { return access_flags().is_abstract(); }
575 575 bool is_strict() const { return access_flags().is_strict(); }
576 576 bool is_synthetic() const { return access_flags().is_synthetic(); }
577 577
578 578 // returns true if contains only return operation
579 579 bool is_empty_method() const;
580 580
581 581 // returns true if this is a vanilla constructor
582 582 bool is_vanilla_constructor() const;
583 583
584 584 // checks method and its method holder
585 585 bool is_final_method() const;
586 586 bool is_final_method(AccessFlags class_access_flags) const;
587 587 bool is_default_method() const;
588 588
589 589 // true if method needs no dynamic dispatch (final and/or no vtable entry)
590 590 bool can_be_statically_bound() const;
591 591 bool can_be_statically_bound(AccessFlags class_access_flags) const;
592 592
593 593 // returns true if the method has any backward branches.
594 594 bool has_loops() {
595 595 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
596 596 };
597 597
598 598 bool compute_has_loops_flag();
599 599
600 600 bool has_jsrs() {
601 601 return access_flags().has_jsrs();
602 602 };
603 603 void set_has_jsrs() {
604 604 _access_flags.set_has_jsrs();
605 605 }
606 606
607 607 // returns true if the method has any monitors.
608 608 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
609 609 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
610 610
611 611 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
612 612
613 613 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
614 614 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
615 615 // has not been computed yet.
616 616 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
617 617 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
618 618
619 619 // returns true if the method is an accessor function (setter/getter).
620 620 bool is_accessor() const;
621 621
622 622 // returns true if the method is an initializer (<init> or <clinit>).
623 623 bool is_initializer() const;
624 624
625 625 // returns true if the method is static OR if the classfile version < 51
626 626 bool has_valid_initializer_flags() const;
627 627
628 628 // returns true if the method name is <clinit> and the method has
629 629 // valid static initializer flags.
630 630 bool is_static_initializer() const;
631 631
632 632 // compiled code support
633 633 // NOTE: code() is inherently racy as deopt can be clearing code
634 634 // simultaneously. Use with caution.
635 635 bool has_compiled_code() const { return code() != NULL; }
636 636
637 637 // sizing
638 638 static int header_size() { return sizeof(Method)/HeapWordSize; }
639 639 static int size(bool is_native);
640 640 int size() const { return method_size(); }
641 641 #if INCLUDE_SERVICES
642 642 void collect_statistics(KlassSizeStats *sz) const;
643 643 #endif
644 644
645 645 // interpreter support
646 646 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
647 647 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
648 648 #ifdef CC_INTERP
649 649 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
650 650 #endif /* CC_INTERP */
651 651 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
652 652 static ByteSize code_offset() { return byte_offset_of(Method, _code); }
653 653 static ByteSize method_data_offset() {
654 654 return byte_offset_of(Method, _method_data);
655 655 }
656 656 static ByteSize method_counters_offset() {
657 657 return byte_offset_of(Method, _method_counters);
658 658 }
659 659 #ifndef PRODUCT
660 660 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
661 661 #endif // not PRODUCT
662 662 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
663 663 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
664 664 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
665 665 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
666 666
667 667 // for code generation
668 668 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
669 669 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
670 670 static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
671 671
672 672 // Static methods that are used to implement member methods where an exposed this pointer
673 673 // is needed due to possible GCs
674 674 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
675 675
676 676 // Returns the byte code index from the byte code pointer
677 677 int bci_from(address bcp) const;
678 678 address bcp_from(int bci) const;
679 679 int validate_bci_from_bcx(intptr_t bcx) const;
680 680
681 681 // Returns the line number for a bci if debugging information for the method is prowided,
682 682 // -1 is returned otherwise.
683 683 int line_number_from_bci(int bci) const;
684 684
685 685 // Reflection support
686 686 bool is_overridden_in(Klass* k) const;
687 687
688 688 // Stack walking support
689 689 bool is_ignored_by_security_stack_walk() const;
690 690
691 691 // JSR 292 support
692 692 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
693 693 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
694 694 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
695 695 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
696 696 Symbol* signature, //anything at all
697 697 TRAPS);
698 698 static Klass* check_non_bcp_klass(Klass* klass);
699 699
700 700 // How many extra stack entries for invokedynamic when it's enabled
701 701 static const int extra_stack_entries_for_jsr292 = 1;
702 702
703 703 // this operates only on invoke methods:
704 704 // presize interpreter frames for extra interpreter stack entries, if needed
705 705 // Account for the extra appendix argument for invokehandle/invokedynamic
706 706 static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
707 707 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
708 708
709 709 // RedefineClasses() support:
710 710 bool is_old() const { return access_flags().is_old(); }
711 711 void set_is_old() { _access_flags.set_is_old(); }
712 712 bool is_obsolete() const { return access_flags().is_obsolete(); }
713 713 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
714 714 bool on_stack() const { return access_flags().on_stack(); }
715 715 void set_on_stack(const bool value);
716 716
717 717 // see the definition in Method*.cpp for the gory details
718 718 bool should_not_be_cached() const;
719 719
720 720 // JVMTI Native method prefixing support:
721 721 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
722 722 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
723 723
724 724 // Rewriting support
725 725 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
726 726 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
727 727
728 728 // jmethodID handling
729 729 // Because the useful life-span of a jmethodID cannot be determined,
730 730 // once created they are never reclaimed. The methods to which they refer,
731 731 // however, can be GC'ed away if the class is unloaded or if the method is
732 732 // made obsolete or deleted -- in these cases, the jmethodID
733 733 // refers to NULL (as is the case for any weak reference).
734 734 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
735 735 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
736 736
737 737 // Use resolve_jmethod_id() in situations where the caller is expected
738 738 // to provide a valid jmethodID; the only sanity checks are in asserts;
739 739 // result guaranteed not to be NULL.
740 740 inline static Method* resolve_jmethod_id(jmethodID mid) {
741 741 assert(mid != NULL, "JNI method id should not be null");
742 742 return *((Method**)mid);
743 743 }
744 744
745 745 // Use checked_resolve_jmethod_id() in situations where the caller
746 746 // should provide a valid jmethodID, but might not. NULL is returned
747 747 // when the jmethodID does not refer to a valid method.
748 748 static Method* checked_resolve_jmethod_id(jmethodID mid);
749 749
750 750 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
751 751 static bool is_method_id(jmethodID mid);
752 752
753 753 // Clear methods
754 754 static void clear_jmethod_ids(ClassLoaderData* loader_data);
755 755 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
756 756
757 757 // Get this method's jmethodID -- allocate if it doesn't exist
758 758 jmethodID jmethod_id() { methodHandle this_h(this);
759 759 return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
760 760
761 761 // Lookup the jmethodID for this method. Return NULL if not found.
762 762 // NOTE that this function can be called from a signal handler
763 763 // (see AsyncGetCallTrace support for Forte Analyzer) and this
764 764 // needs to be async-safe. No allocation should be done and
765 765 // so handles are not used to avoid deadlock.
766 766 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
767 767
768 768 // Support for inlining of intrinsic methods
769 769 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
770 770 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
771 771
772 772 // Helper routines for intrinsic_id() and vmIntrinsics::method().
773 773 void init_intrinsic_id(); // updates from _none if a match
774 774 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
775 775
776 776 bool jfr_towrite() { return _jfr_towrite; }
777 777 void set_jfr_towrite(bool x) { _jfr_towrite = x; }
778 778 bool caller_sensitive() { return _caller_sensitive; }
779 779 void set_caller_sensitive(bool x) { _caller_sensitive = x; }
780 780 bool force_inline() { return _force_inline; }
781 781 void set_force_inline(bool x) { _force_inline = x; }
782 782 bool dont_inline() { return _dont_inline; }
783 783 void set_dont_inline(bool x) { _dont_inline = x; }
784 784 bool is_hidden() { return _hidden; }
785 785 void set_hidden(bool x) { _hidden = x; }
↓ open down ↓ |
785 lines elided |
↑ open up ↑ |
786 786 ConstMethod::MethodType method_type() const {
787 787 return _constMethod->method_type();
788 788 }
789 789 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
790 790
791 791 // On-stack replacement support
792 792 bool has_osr_nmethod(int level, bool match_level) {
793 793 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
794 794 }
795 795
796 + int mark_osr_nmethods() {
797 + return method_holder()->mark_osr_nmethods(this);
798 + }
799 +
796 800 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
797 801 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
798 802 }
799 803
800 804 // Inline cache support
801 805 void cleanup_inline_caches();
802 806
803 807 // Find if klass for method is loaded
804 808 bool is_klass_loaded_by_klass_index(int klass_index) const;
805 809 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
806 810
807 811 // Indicates whether compilation failed earlier for this method, or
808 812 // whether it is not compilable for another reason like having a
809 813 // breakpoint set in it.
810 814 bool is_not_compilable(int comp_level = CompLevel_any) const;
811 815 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
812 816 void set_not_compilable_quietly(int comp_level = CompLevel_all) {
813 817 set_not_compilable(comp_level, false);
814 818 }
815 819 bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
816 820 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
817 821 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
818 822 set_not_osr_compilable(comp_level, false);
819 823 }
820 824 bool is_always_compilable() const;
821 825
822 826 private:
823 827 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
824 828
825 829 public:
826 830 MethodCounters* get_method_counters(TRAPS) {
827 831 if (_method_counters == NULL) {
828 832 build_method_counters(this, CHECK_AND_CLEAR_NULL);
829 833 }
830 834 return _method_counters;
831 835 }
832 836
833 837 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
834 838 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
835 839 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
836 840 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
837 841 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
838 842 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); }
839 843
840 844 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
841 845 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
842 846 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit
843 847 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
844 848 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
845 849 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); }
846 850
847 851 // Background compilation support
848 852 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
849 853 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
850 854 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
851 855
852 856 // Resolve all classes in signature, return 'true' if successful
853 857 static bool load_signature_classes(methodHandle m, TRAPS);
854 858
855 859 // Return if true if not all classes references in signature, including return type, has been loaded
856 860 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
857 861
858 862 // Printing
859 863 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM
860 864 #if INCLUDE_JVMTI
861 865 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses
862 866 #else
863 867 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
864 868 #endif
865 869
866 870 // Helper routine used for method sorting
867 871 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
868 872
869 873 // Deallocation function for redefine classes or if an error occurs
870 874 void deallocate_contents(ClassLoaderData* loader_data);
871 875
872 876 // Printing
873 877 #ifndef PRODUCT
874 878 void print_on(outputStream* st) const;
875 879 #endif
876 880 void print_value_on(outputStream* st) const;
877 881
878 882 const char* internal_name() const { return "{method}"; }
879 883
880 884 // Check for valid method pointer
881 885 static bool has_method_vptr(const void* ptr);
882 886 bool is_valid_method() const;
883 887
884 888 // Verify
885 889 void verify() { verify_on(tty); }
886 890 void verify_on(outputStream* st);
887 891
888 892 private:
889 893
890 894 // Inlined elements
891 895 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
892 896 address* signature_handler_addr() const { return native_function_addr() + 1; }
893 897 };
894 898
895 899
896 900 // Utility class for compressing line number tables
897 901
898 902 class CompressedLineNumberWriteStream: public CompressedWriteStream {
899 903 private:
900 904 int _bci;
901 905 int _line;
902 906 public:
903 907 // Constructor
904 908 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
905 909 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
906 910
907 911 // Write (bci, line number) pair to stream
908 912 void write_pair_regular(int bci_delta, int line_delta);
909 913
910 914 inline void write_pair_inline(int bci, int line) {
911 915 int bci_delta = bci - _bci;
912 916 int line_delta = line - _line;
913 917 _bci = bci;
914 918 _line = line;
915 919 // Skip (0,0) deltas - they do not add information and conflict with terminator.
916 920 if (bci_delta == 0 && line_delta == 0) return;
917 921 // Check if bci is 5-bit and line number 3-bit unsigned.
918 922 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
919 923 // Compress into single byte.
920 924 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
921 925 // Check that value doesn't match escape character.
922 926 if (value != 0xFF) {
923 927 write_byte(value);
924 928 return;
925 929 }
926 930 }
927 931 write_pair_regular(bci_delta, line_delta);
928 932 }
929 933
930 934 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
931 935 // Disabling optimization doesn't work for methods in header files
932 936 // so we force it to call through the non-optimized version in the .cpp.
933 937 // It's gross, but it's the only way we can ensure that all callers are
934 938 // fixed. _MSC_VER is defined by the windows compiler
935 939 #if defined(_M_AMD64) && _MSC_VER >= 1400
936 940 void write_pair(int bci, int line);
937 941 #else
938 942 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
939 943 #endif
940 944
941 945 // Write end-of-stream marker
942 946 void write_terminator() { write_byte(0); }
943 947 };
944 948
945 949
946 950 // Utility class for decompressing line number tables
947 951
948 952 class CompressedLineNumberReadStream: public CompressedReadStream {
949 953 private:
950 954 int _bci;
951 955 int _line;
952 956 public:
953 957 // Constructor
954 958 CompressedLineNumberReadStream(u_char* buffer);
955 959 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
956 960 bool read_pair();
957 961 // Accessing bci and line number (after calling read_pair)
958 962 int bci() const { return _bci; }
959 963 int line() const { return _line; }
960 964 };
961 965
962 966
963 967 /// Fast Breakpoints.
964 968
965 969 // If this structure gets more complicated (because bpts get numerous),
966 970 // move it into its own header.
967 971
968 972 // There is presently no provision for concurrent access
969 973 // to breakpoint lists, which is only OK for JVMTI because
970 974 // breakpoints are written only at safepoints, and are read
971 975 // concurrently only outside of safepoints.
972 976
973 977 class BreakpointInfo : public CHeapObj<mtClass> {
974 978 friend class VMStructs;
975 979 private:
976 980 Bytecodes::Code _orig_bytecode;
977 981 int _bci;
978 982 u2 _name_index; // of method
979 983 u2 _signature_index; // of method
980 984 BreakpointInfo* _next; // simple storage allocation
981 985
982 986 public:
983 987 BreakpointInfo(Method* m, int bci);
984 988
985 989 // accessors
986 990 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
987 991 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
988 992 int bci() { return _bci; }
989 993
990 994 BreakpointInfo* next() const { return _next; }
991 995 void set_next(BreakpointInfo* n) { _next = n; }
992 996
993 997 // helps for searchers
994 998 bool match(const Method* m, int bci) {
995 999 return bci == _bci && match(m);
996 1000 }
997 1001
998 1002 bool match(const Method* m) {
999 1003 return _name_index == m->name_index() &&
1000 1004 _signature_index == m->signature_index();
1001 1005 }
1002 1006
1003 1007 void set(Method* method);
1004 1008 void clear(Method* method);
1005 1009 };
1006 1010
1007 1011 // Utility class for access exception handlers
1008 1012 class ExceptionTable : public StackObj {
1009 1013 private:
1010 1014 ExceptionTableElement* _table;
1011 1015 u2 _length;
1012 1016
1013 1017 public:
1014 1018 ExceptionTable(const Method* m) {
1015 1019 if (m->has_exception_handler()) {
1016 1020 _table = m->exception_table_start();
1017 1021 _length = m->exception_table_length();
1018 1022 } else {
1019 1023 _table = NULL;
1020 1024 _length = 0;
1021 1025 }
1022 1026 }
1023 1027
1024 1028 int length() const {
1025 1029 return _length;
1026 1030 }
1027 1031
1028 1032 u2 start_pc(int idx) const {
1029 1033 assert(idx < _length, "out of bounds");
1030 1034 return _table[idx].start_pc;
1031 1035 }
1032 1036
1033 1037 void set_start_pc(int idx, u2 value) {
1034 1038 assert(idx < _length, "out of bounds");
1035 1039 _table[idx].start_pc = value;
1036 1040 }
1037 1041
1038 1042 u2 end_pc(int idx) const {
1039 1043 assert(idx < _length, "out of bounds");
1040 1044 return _table[idx].end_pc;
1041 1045 }
1042 1046
1043 1047 void set_end_pc(int idx, u2 value) {
1044 1048 assert(idx < _length, "out of bounds");
1045 1049 _table[idx].end_pc = value;
1046 1050 }
1047 1051
1048 1052 u2 handler_pc(int idx) const {
1049 1053 assert(idx < _length, "out of bounds");
1050 1054 return _table[idx].handler_pc;
1051 1055 }
1052 1056
1053 1057 void set_handler_pc(int idx, u2 value) {
1054 1058 assert(idx < _length, "out of bounds");
1055 1059 _table[idx].handler_pc = value;
1056 1060 }
1057 1061
1058 1062 u2 catch_type_index(int idx) const {
1059 1063 assert(idx < _length, "out of bounds");
1060 1064 return _table[idx].catch_type_index;
1061 1065 }
1062 1066
1063 1067 void set_catch_type_index(int idx, u2 value) {
1064 1068 assert(idx < _length, "out of bounds");
1065 1069 _table[idx].catch_type_index = value;
1066 1070 }
1067 1071 };
1068 1072
1069 1073 #endif // SHARE_VM_OOPS_METHODOOP_HPP
↓ open down ↓ |
264 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX