Print this page
rev 1083 : [mq]: indy.compiler.inline.patch
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/oops/methodOop.hpp
+++ new/src/share/vm/oops/methodOop.hpp
1 1 /*
2 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 // A methodOop represents a Java method.
26 26 //
27 27 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
28 28 // so keeping the size of this structure small has a big impact on footprint.
29 29 //
30 30 // We put all oops and method_size first for better gc cache locality.
31 31 //
32 32 // The actual bytecodes are inlined after the end of the methodOopDesc struct.
33 33 //
34 34 // There are bits in the access_flags telling whether inlined tables are present.
35 35 // Note that accessing the line number and local variable tables is not performance critical at all.
36 36 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
37 37 // to it fast.
38 38 //
39 39 // The line number table is compressed and inlined following the byte codes. It is found as the first
40 40 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
41 41 // after the line number table, and indexed from the end of the method. We do not compress the checked
42 42 // exceptions table since the average length is less than 2, and do not bother to compress the local
43 43 // variable table either since it is mostly absent.
44 44 //
45 45 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
46 46 //
47 47 // |------------------------------------------------------|
48 48 // | header |
49 49 // | klass |
50 50 // |------------------------------------------------------|
51 51 // | constMethodOop (oop) |
52 52 // | constants (oop) |
53 53 // |------------------------------------------------------|
54 54 // | methodData (oop) |
55 55 // | interp_invocation_count |
56 56 // |------------------------------------------------------|
57 57 // | access_flags |
58 58 // | vtable_index |
59 59 // |------------------------------------------------------|
60 60 // | result_index (C++ interpreter only) |
61 61 // |------------------------------------------------------|
62 62 // | method_size | max_stack |
63 63 // | max_locals | size_of_parameters |
64 64 // |------------------------------------------------------|
65 65 // | intrinsic_id, highest_tier | (unused) |
66 66 // |------------------------------------------------------|
67 67 // | throwout_count | num_breakpoints |
68 68 // |------------------------------------------------------|
69 69 // | invocation_counter |
70 70 // | backedge_counter |
71 71 // |------------------------------------------------------|
72 72 // | code (pointer) |
73 73 // | i2i (pointer) |
74 74 // | adapter (pointer) |
75 75 // | from_compiled_entry (pointer) |
76 76 // | from_interpreted_entry (pointer) |
77 77 // |------------------------------------------------------|
78 78 // | native_function (present only if native) |
79 79 // | signature_handler (present only if native) |
80 80 // |------------------------------------------------------|
81 81
82 82
83 83 class CheckedExceptionElement;
84 84 class LocalVariableTableElement;
85 85 class AdapterHandlerEntry;
86 86
87 87 class methodDataOopDesc;
88 88
89 89 class methodOopDesc : public oopDesc {
90 90 friend class methodKlass;
91 91 friend class VMStructs;
92 92 private:
93 93 constMethodOop _constMethod; // Method read-only data.
94 94 constantPoolOop _constants; // Constant pool
95 95 methodDataOop _method_data;
96 96 int _interpreter_invocation_count; // Count of times invoked
97 97 AccessFlags _access_flags; // Access flags
98 98 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
99 99 // note: can have vtables with >2**16 elements (because of inheritance)
100 100 #ifdef CC_INTERP
101 101 int _result_index; // C++ interpreter needs for converting results to/from stack
102 102 #endif
103 103 u2 _method_size; // size of this object
104 104 u2 _max_stack; // Maximum number of entries on the expression stack
105 105 u2 _max_locals; // Number of local variables used by this method
106 106 u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
107 107 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
108 108 u1 _highest_tier_compile; // Highest compile level this method has ever seen.
109 109 u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
110 110 u2 _number_of_breakpoints; // fullspeed debugging support
111 111 InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
112 112 InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
113 113 #ifndef PRODUCT
114 114 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
115 115 #endif
116 116 // Entry point for calling both from and to the interpreter.
117 117 address _i2i_entry; // All-args-on-stack calling convention
118 118 // Adapter blob (i2c/c2i) for this methodOop. Set once when method is linked.
119 119 AdapterHandlerEntry* _adapter;
120 120 // Entry point for calling from compiled code, to compiled code if it exists
121 121 // or else the interpreter.
122 122 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
123 123 // The entry point for calling both from and to compiled code is
124 124 // "_code->entry_point()". Because of tiered compilation and de-opt, this
125 125 // field can come and go. It can transition from NULL to not-null at any
126 126 // time (whenever a compile completes). It can transition from not-null to
127 127 // NULL only at safepoints (because of a de-opt).
128 128 nmethod* volatile _code; // Points to the corresponding piece of native code
129 129 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
130 130
131 131 public:
132 132
133 133 static const bool IsUnsafeConc = false;
134 134 static const bool IsSafeConc = true;
135 135
136 136 // accessors for instance variables
137 137 constMethodOop constMethod() const { return _constMethod; }
138 138 void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); }
139 139
140 140
141 141 static address make_adapters(methodHandle mh, TRAPS);
142 142 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
143 143 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
144 144
145 145 // access flag
146 146 AccessFlags access_flags() const { return _access_flags; }
147 147 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
148 148
149 149 // name
150 150 symbolOop name() const { return _constants->symbol_at(name_index()); }
151 151 int name_index() const { return constMethod()->name_index(); }
152 152 void set_name_index(int index) { constMethod()->set_name_index(index); }
153 153
154 154 // signature
155 155 symbolOop signature() const { return _constants->symbol_at(signature_index()); }
156 156 int signature_index() const { return constMethod()->signature_index(); }
157 157 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
158 158
159 159 // generics support
160 160 symbolOop generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? _constants->symbol_at(idx) : (symbolOop)NULL); }
161 161 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
162 162 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
163 163
164 164 // annotations support
165 165 typeArrayOop annotations() const { return instanceKlass::cast(method_holder())->get_method_annotations_of(method_idnum()); }
166 166 typeArrayOop parameter_annotations() const { return instanceKlass::cast(method_holder())->get_method_parameter_annotations_of(method_idnum()); }
167 167 typeArrayOop annotation_default() const { return instanceKlass::cast(method_holder())->get_method_default_annotations_of(method_idnum()); }
168 168
169 169 #ifdef CC_INTERP
170 170 void set_result_index(BasicType type);
171 171 int result_index() { return _result_index; }
172 172 #endif
173 173
174 174 // Helper routine: get klass name + "." + method name + signature as
175 175 // C string, for the purpose of providing more useful NoSuchMethodErrors
176 176 // and fatal error handling. The string is allocated in resource
177 177 // area if a buffer is not provided by the caller.
178 178 char* name_and_sig_as_C_string();
179 179 char* name_and_sig_as_C_string(char* buf, int size);
180 180
181 181 // Static routine in the situations we don't have a methodOop
182 182 static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature);
183 183 static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature, char* buf, int size);
184 184
185 185 // JVMTI breakpoints
186 186 Bytecodes::Code orig_bytecode_at(int bci);
187 187 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
188 188 void set_breakpoint(int bci);
189 189 void clear_breakpoint(int bci);
190 190 void clear_all_breakpoints();
191 191 // Tracking number of breakpoints, for fullspeed debugging.
192 192 // Only mutated by VM thread.
193 193 u2 number_of_breakpoints() const { return _number_of_breakpoints; }
194 194 void incr_number_of_breakpoints() { ++_number_of_breakpoints; }
195 195 void decr_number_of_breakpoints() { --_number_of_breakpoints; }
196 196 // Initialization only
197 197 void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
198 198
199 199 // index into instanceKlass methods() array
200 200 u2 method_idnum() const { return constMethod()->method_idnum(); }
201 201 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
202 202
203 203 // code size
204 204 int code_size() const { return constMethod()->code_size(); }
205 205
206 206 // method size
207 207 int method_size() const { return _method_size; }
208 208 void set_method_size(int size) {
209 209 assert(0 <= size && size < (1 << 16), "invalid method size");
210 210 _method_size = size;
211 211 }
212 212
213 213 // constant pool for klassOop holding this method
214 214 constantPoolOop constants() const { return _constants; }
215 215 void set_constants(constantPoolOop c) { oop_store_without_check((oop*)&_constants, c); }
216 216
217 217 // max stack
218 218 int max_stack() const { return _max_stack; }
219 219 void set_max_stack(int size) { _max_stack = size; }
220 220
221 221 // max locals
222 222 int max_locals() const { return _max_locals; }
223 223 void set_max_locals(int size) { _max_locals = size; }
224 224 int highest_tier_compile() { return _highest_tier_compile;}
225 225 void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
226 226
227 227 // Count of times method was exited via exception while interpreting
228 228 void interpreter_throwout_increment() {
229 229 if (_interpreter_throwout_count < 65534) {
230 230 _interpreter_throwout_count++;
231 231 }
232 232 }
233 233
234 234 int interpreter_throwout_count() const { return _interpreter_throwout_count; }
235 235 void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
236 236
237 237 // size of parameters
238 238 int size_of_parameters() const { return _size_of_parameters; }
239 239
240 240 bool has_stackmap_table() const {
241 241 return constMethod()->has_stackmap_table();
242 242 }
243 243
244 244 typeArrayOop stackmap_data() const {
245 245 return constMethod()->stackmap_data();
246 246 }
247 247
248 248 // exception handler table
249 249 typeArrayOop exception_table() const
250 250 { return constMethod()->exception_table(); }
251 251 void set_exception_table(typeArrayOop e)
252 252 { constMethod()->set_exception_table(e); }
253 253 bool has_exception_handler() const
254 254 { return constMethod()->has_exception_handler(); }
255 255
256 256 // Finds the first entry point bci of an exception handler for an
257 257 // exception of klass ex_klass thrown at throw_bci. A value of NULL
258 258 // for ex_klass indicates that the exception klass is not known; in
259 259 // this case it matches any constraint class. Returns -1 if the
260 260 // exception cannot be handled in this method. The handler
261 261 // constraint classes are loaded if necessary. Note that this may
262 262 // throw an exception if loading of the constraint classes causes
263 263 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
264 264 // If an exception is thrown, returns the bci of the
265 265 // exception handler which caused the exception to be thrown, which
266 266 // is needed for proper retries. See, for example,
267 267 // InterpreterRuntime::exception_handler_for_exception.
268 268 int fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS);
269 269
270 270 // method data access
271 271 methodDataOop method_data() const {
272 272 return _method_data;
273 273 }
274 274 void set_method_data(methodDataOop data) {
275 275 oop_store_without_check((oop*)&_method_data, (oop)data);
276 276 }
277 277
278 278 // invocation counter
279 279 InvocationCounter* invocation_counter() { return &_invocation_counter; }
280 280 InvocationCounter* backedge_counter() { return &_backedge_counter; }
281 281 int invocation_count() const { return _invocation_counter.count(); }
282 282 int backedge_count() const { return _backedge_counter.count(); }
283 283 bool was_executed_more_than(int n) const;
284 284 bool was_never_executed() const { return !was_executed_more_than(0); }
285 285
286 286 static void build_interpreter_method_data(methodHandle method, TRAPS);
287 287
288 288 int interpreter_invocation_count() const { return _interpreter_invocation_count; }
289 289 void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
290 290 int increment_interpreter_invocation_count() { return ++_interpreter_invocation_count; }
291 291
292 292 #ifndef PRODUCT
293 293 int compiled_invocation_count() const { return _compiled_invocation_count; }
294 294 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
295 295 #endif // not PRODUCT
296 296
297 297 // Clear (non-shared space) pointers which could not be relevant
298 298 // if this (shared) method were mapped into another JVM.
299 299 void remove_unshareable_info();
300 300
301 301 // nmethod/verified compiler entry
302 302 address verified_code_entry();
303 303 bool check_code() const; // Not inline to avoid circular ref
304 304 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
305 305 void clear_code(); // Clear out any compiled code
306 306 void set_code(methodHandle mh, nmethod* code);
307 307 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
308 308 address get_i2c_entry();
309 309 address get_c2i_entry();
310 310 address get_c2i_unverified_entry();
311 311 AdapterHandlerEntry* adapter() { return _adapter; }
312 312 // setup entry points
313 313 void link_method(methodHandle method, TRAPS);
314 314 // clear entry points. Used by sharing code
315 315 void unlink_method();
316 316
317 317 // vtable index
318 318 enum VtableIndexFlag {
319 319 // Valid vtable indexes are non-negative (>= 0).
320 320 // These few negative values are used as sentinels.
321 321 highest_unused_vtable_index_value = -5,
322 322 invalid_vtable_index = -4, // distinct from any valid vtable index
323 323 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
324 324 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
325 325 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
326 326 };
327 327 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
328 328 int vtable_index() const { assert(valid_vtable_index(), "");
329 329 return _vtable_index; }
330 330 void set_vtable_index(int index) { _vtable_index = index; }
331 331
332 332 // interpreter entry
333 333 address interpreter_entry() const { return _i2i_entry; }
334 334 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
335 335 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
336 336 int interpreter_kind(void) {
337 337 return constMethod()->interpreter_kind();
338 338 }
339 339 void set_interpreter_kind();
340 340 void set_interpreter_kind(int kind) {
341 341 constMethod()->set_interpreter_kind(kind);
342 342 }
343 343
344 344 // native function (used for native methods only)
345 345 enum {
346 346 native_bind_event_is_interesting = true
347 347 };
348 348 address native_function() const { return *(native_function_addr()); }
349 349 // Must specify a real function (not NULL).
350 350 // Use clear_native_function() to unregister.
351 351 void set_native_function(address function, bool post_event_flag);
352 352 bool has_native_function() const;
353 353 void clear_native_function();
354 354
355 355 // signature handler (used for native methods only)
356 356 address signature_handler() const { return *(signature_handler_addr()); }
357 357 void set_signature_handler(address handler);
↓ open down ↓ |
357 lines elided |
↑ open up ↑ |
358 358
359 359 // Interpreter oopmap support
360 360 void mask_for(int bci, InterpreterOopMap* mask);
361 361
362 362 #ifndef PRODUCT
363 363 // operations on invocation counter
364 364 void print_invocation_count() const;
365 365 #endif
366 366
367 367 // byte codes
368 + void set_code(address code) { return constMethod()->set_code(code); }
368 369 address code_base() const { return constMethod()->code_base(); }
369 370 bool contains(address bcp) const { return constMethod()->contains(bcp); }
370 371
371 372 // prints byte codes
372 373 void print_codes() const { print_codes_on(tty); }
373 374 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
374 375 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
375 376
376 377 // checked exceptions
377 378 int checked_exceptions_length() const
378 379 { return constMethod()->checked_exceptions_length(); }
379 380 CheckedExceptionElement* checked_exceptions_start() const
380 381 { return constMethod()->checked_exceptions_start(); }
381 382
382 383 // localvariable table
383 384 bool has_localvariable_table() const
384 385 { return constMethod()->has_localvariable_table(); }
385 386 int localvariable_table_length() const
386 387 { return constMethod()->localvariable_table_length(); }
387 388 LocalVariableTableElement* localvariable_table_start() const
388 389 { return constMethod()->localvariable_table_start(); }
389 390
390 391 bool has_linenumber_table() const
391 392 { return constMethod()->has_linenumber_table(); }
392 393 u_char* compressed_linenumber_table() const
393 394 { return constMethod()->compressed_linenumber_table(); }
394 395
395 396 // method holder (the klassOop holding this method)
396 397 klassOop method_holder() const { return _constants->pool_holder(); }
397 398
398 399 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
399 400 symbolOop klass_name() const; // returns the name of the method holder
400 401 BasicType result_type() const; // type of the method result
401 402 int result_type_index() const; // type index of the method result
402 403 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
403 404 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
404 405
405 406 // Checked exceptions thrown by this method (resolved to mirrors)
406 407 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
407 408
408 409 // Access flags
409 410 bool is_public() const { return access_flags().is_public(); }
410 411 bool is_private() const { return access_flags().is_private(); }
411 412 bool is_protected() const { return access_flags().is_protected(); }
412 413 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
413 414 bool is_static() const { return access_flags().is_static(); }
414 415 bool is_final() const { return access_flags().is_final(); }
415 416 bool is_synchronized() const { return access_flags().is_synchronized();}
416 417 bool is_native() const { return access_flags().is_native(); }
417 418 bool is_abstract() const { return access_flags().is_abstract(); }
418 419 bool is_strict() const { return access_flags().is_strict(); }
419 420 bool is_synthetic() const { return access_flags().is_synthetic(); }
420 421
421 422 // returns true if contains only return operation
422 423 bool is_empty_method() const;
423 424
424 425 // returns true if this is a vanilla constructor
425 426 bool is_vanilla_constructor() const;
426 427
427 428 // checks method and its method holder
428 429 bool is_final_method() const;
429 430 bool is_strict_method() const;
430 431
431 432 // true if method needs no dynamic dispatch (final and/or no vtable entry)
432 433 bool can_be_statically_bound() const;
433 434
434 435 // returns true if the method has any backward branches.
435 436 bool has_loops() {
436 437 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
437 438 };
438 439
439 440 bool compute_has_loops_flag();
440 441
441 442 bool has_jsrs() {
442 443 return access_flags().has_jsrs();
443 444 };
444 445 void set_has_jsrs() {
445 446 _access_flags.set_has_jsrs();
446 447 }
447 448
448 449 // returns true if the method has any monitors.
449 450 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
450 451 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
451 452
452 453 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
453 454
454 455 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
455 456 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
456 457 // has not been computed yet.
457 458 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
458 459 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
459 460
460 461 // returns true if the method is an accessor function (setter/getter).
461 462 bool is_accessor() const;
462 463
463 464 // returns true if the method is an initializer (<init> or <clinit>).
464 465 bool is_initializer() const;
465 466
466 467 // compiled code support
467 468 // NOTE: code() is inherently racy as deopt can be clearing code
468 469 // simultaneously. Use with caution.
469 470 bool has_compiled_code() const { return code() != NULL; }
470 471
471 472 // sizing
472 473 static int object_size(bool is_native);
473 474 static int header_size() { return sizeof(methodOopDesc)/HeapWordSize; }
474 475 int object_size() const { return method_size(); }
475 476
476 477 bool object_is_parsable() const { return method_size() > 0; }
477 478
478 479 // interpreter support
479 480 static ByteSize const_offset() { return byte_offset_of(methodOopDesc, _constMethod ); }
480 481 static ByteSize constants_offset() { return byte_offset_of(methodOopDesc, _constants ); }
481 482 static ByteSize access_flags_offset() { return byte_offset_of(methodOopDesc, _access_flags ); }
482 483 #ifdef CC_INTERP
483 484 static ByteSize result_index_offset() { return byte_offset_of(methodOopDesc, _result_index ); }
484 485 #endif /* CC_INTERP */
485 486 static ByteSize size_of_locals_offset() { return byte_offset_of(methodOopDesc, _max_locals ); }
486 487 static ByteSize size_of_parameters_offset() { return byte_offset_of(methodOopDesc, _size_of_parameters); }
487 488 static ByteSize from_compiled_offset() { return byte_offset_of(methodOopDesc, _from_compiled_entry); }
488 489 static ByteSize code_offset() { return byte_offset_of(methodOopDesc, _code); }
489 490 static ByteSize invocation_counter_offset() { return byte_offset_of(methodOopDesc, _invocation_counter); }
490 491 static ByteSize backedge_counter_offset() { return byte_offset_of(methodOopDesc, _backedge_counter); }
491 492 static ByteSize method_data_offset() {
492 493 return byte_offset_of(methodOopDesc, _method_data);
493 494 }
494 495 static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(methodOopDesc, _interpreter_invocation_count); }
495 496 #ifndef PRODUCT
496 497 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(methodOopDesc, _compiled_invocation_count); }
497 498 #endif // not PRODUCT
498 499 static ByteSize native_function_offset() { return in_ByteSize(sizeof(methodOopDesc)); }
499 500 static ByteSize from_interpreted_offset() { return byte_offset_of(methodOopDesc, _from_interpreted_entry ); }
500 501 static ByteSize interpreter_entry_offset() { return byte_offset_of(methodOopDesc, _i2i_entry ); }
501 502 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(methodOopDesc) + wordSize); }
502 503 static ByteSize max_stack_offset() { return byte_offset_of(methodOopDesc, _max_stack ); }
503 504
504 505 // for code generation
505 506 static int method_data_offset_in_bytes() { return offset_of(methodOopDesc, _method_data); }
506 507 static int interpreter_invocation_counter_offset_in_bytes()
507 508 { return offset_of(methodOopDesc, _interpreter_invocation_count); }
508 509
509 510 // Static methods that are used to implement member methods where an exposed this pointer
510 511 // is needed due to possible GCs
511 512 static objArrayHandle resolved_checked_exceptions_impl(methodOop this_oop, TRAPS);
512 513
513 514 // Returns the byte code index from the byte code pointer
514 515 int bci_from(address bcp) const;
515 516 address bcp_from(int bci) const;
516 517 int validate_bci_from_bcx(intptr_t bcx) const;
517 518
518 519 // Returns the line number for a bci if debugging information for the method is prowided,
519 520 // -1 is returned otherwise.
520 521 int line_number_from_bci(int bci) const;
521 522
522 523 // Reflection support
523 524 bool is_overridden_in(klassOop k) const;
524 525
525 526 // JSR 292 support
526 527 bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); }
527 528 static methodHandle make_invoke_method(KlassHandle holder,
528 529 symbolHandle signature,
529 530 Handle method_type,
530 531 TRAPS);
531 532 // these operate only on invoke methods:
532 533 oop method_handle_type() const;
533 534 static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1
534 535 // presize interpreter frames for extra interpreter stack entries, if needed
535 536 // method handles want to be able to push a few extra values (e.g., a bound receiver), and
536 537 // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
537 538 // all without checking for a stack overflow
538 539 static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); }
539 540 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
540 541 // RedefineClasses() support:
541 542 bool is_old() const { return access_flags().is_old(); }
542 543 void set_is_old() { _access_flags.set_is_old(); }
543 544 bool is_obsolete() const { return access_flags().is_obsolete(); }
544 545 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
545 546 // see the definition in methodOop.cpp for the gory details
546 547 bool should_not_be_cached() const;
547 548
548 549 // JVMTI Native method prefixing support:
549 550 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
550 551 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
551 552
552 553 // Rewriting support
553 554 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
554 555 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
555 556
556 557 // Get this method's jmethodID -- allocate if it doesn't exist
557 558 jmethodID jmethod_id() { methodHandle this_h(this);
558 559 return instanceKlass::get_jmethod_id(method_holder(), this_h); }
559 560
560 561 // Lookup the jmethodID for this method. Return NULL if not found.
561 562 // NOTE that this function can be called from a signal handler
562 563 // (see AsyncGetCallTrace support for Forte Analyzer) and this
563 564 // needs to be async-safe. No allocation should be done and
564 565 // so handles are not used to avoid deadlock.
565 566 jmethodID find_jmethod_id_or_null() { return instanceKlass::cast(method_holder())->jmethod_id_or_null(this); }
566 567
567 568 // JNI static invoke cached itable index accessors
568 569 int cached_itable_index() { return instanceKlass::cast(method_holder())->cached_itable_index(method_idnum()); }
569 570 void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
570 571
571 572 // Support for inlining of intrinsic methods
572 573 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
573 574 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
574 575
575 576 // Helper routines for intrinsic_id() and vmIntrinsics::method().
576 577 void init_intrinsic_id(); // updates from _none if a match
577 578 static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
578 579
579 580 // On-stack replacement support
580 581 bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
581 582 nmethod* lookup_osr_nmethod_for(int bci) { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); }
582 583
583 584 // Inline cache support
584 585 void cleanup_inline_caches();
585 586
586 587 // Find if klass for method is loaded
587 588 bool is_klass_loaded_by_klass_index(int klass_index) const;
588 589 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
589 590
590 591 // Indicates whether compilation failed earlier for this method, or
591 592 // whether it is not compilable for another reason like having a
592 593 // breakpoint set in it.
593 594 bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
594 595 void set_not_compilable(int comp_level = CompLevel_highest_tier);
595 596
596 597 bool is_not_osr_compilable() const { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
597 598 void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); }
598 599
599 600 bool is_not_tier1_compilable() const { return access_flags().is_not_tier1_compilable(); }
600 601 void set_not_tier1_compilable() { _access_flags.set_not_tier1_compilable(); }
601 602
602 603 // Background compilation support
603 604 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
604 605 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
605 606 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
606 607
607 608 static methodOop method_from_bcp(address bcp);
608 609
609 610 // Resolve all classes in signature, return 'true' if successful
610 611 static bool load_signature_classes(methodHandle m, TRAPS);
611 612
612 613 // Return if true if not all classes references in signature, including return type, has been loaded
613 614 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
614 615
615 616 // Printing
616 617 void print_short_name(outputStream* st) /*PRODUCT_RETURN*/; // prints as klassname::methodname; Exposed so field engineers can debug VM
617 618 void print_name(outputStream* st) PRODUCT_RETURN; // prints as "virtual void foo(int)"
618 619
619 620 // Helper routine used for method sorting
620 621 static void sort_methods(objArrayOop methods,
621 622 objArrayOop methods_annotations,
622 623 objArrayOop methods_parameter_annotations,
623 624 objArrayOop methods_default_annotations,
624 625 bool idempotent = false);
625 626
626 627 // size of parameters
627 628 void set_size_of_parameters(int size) { _size_of_parameters = size; }
628 629 private:
629 630
630 631 // Inlined elements
631 632 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
632 633 address* signature_handler_addr() const { return native_function_addr() + 1; }
633 634
634 635 // Garbage collection support
635 636 oop* adr_constMethod() const { return (oop*)&_constMethod; }
636 637 oop* adr_constants() const { return (oop*)&_constants; }
637 638 oop* adr_method_data() const { return (oop*)&_method_data; }
638 639 };
639 640
640 641
641 642 // Utility class for compressing line number tables
642 643
643 644 class CompressedLineNumberWriteStream: public CompressedWriteStream {
644 645 private:
645 646 int _bci;
646 647 int _line;
647 648 public:
648 649 // Constructor
649 650 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
650 651 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
651 652
652 653 // Write (bci, line number) pair to stream
653 654 void write_pair_regular(int bci_delta, int line_delta);
654 655
655 656 inline void write_pair_inline(int bci, int line) {
656 657 int bci_delta = bci - _bci;
657 658 int line_delta = line - _line;
658 659 _bci = bci;
659 660 _line = line;
660 661 // Skip (0,0) deltas - they do not add information and conflict with terminator.
661 662 if (bci_delta == 0 && line_delta == 0) return;
662 663 // Check if bci is 5-bit and line number 3-bit unsigned.
663 664 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
664 665 // Compress into single byte.
665 666 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
666 667 // Check that value doesn't match escape character.
667 668 if (value != 0xFF) {
668 669 write_byte(value);
669 670 return;
670 671 }
671 672 }
672 673 write_pair_regular(bci_delta, line_delta);
673 674 }
674 675
675 676 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
676 677 // Disabling optimization doesn't work for methods in header files
677 678 // so we force it to call through the non-optimized version in the .cpp.
678 679 // It's gross, but it's the only way we can ensure that all callers are
679 680 // fixed. MSC_VER is defined in build/windows/makefiles/compile.make.
680 681 #if defined(_M_AMD64) && MSC_VER >= 1400
681 682 void write_pair(int bci, int line);
682 683 #else
683 684 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
684 685 #endif
685 686
686 687 // Write end-of-stream marker
687 688 void write_terminator() { write_byte(0); }
688 689 };
689 690
690 691
691 692 // Utility class for decompressing line number tables
692 693
693 694 class CompressedLineNumberReadStream: public CompressedReadStream {
694 695 private:
695 696 int _bci;
696 697 int _line;
697 698 public:
698 699 // Constructor
699 700 CompressedLineNumberReadStream(u_char* buffer);
700 701 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
701 702 bool read_pair();
702 703 // Accessing bci and line number (after calling read_pair)
703 704 int bci() const { return _bci; }
704 705 int line() const { return _line; }
705 706 };
706 707
707 708
708 709 /// Fast Breakpoints.
709 710
710 711 // If this structure gets more complicated (because bpts get numerous),
711 712 // move it into its own header.
712 713
713 714 // There is presently no provision for concurrent access
714 715 // to breakpoint lists, which is only OK for JVMTI because
715 716 // breakpoints are written only at safepoints, and are read
716 717 // concurrently only outside of safepoints.
717 718
718 719 class BreakpointInfo : public CHeapObj {
719 720 friend class VMStructs;
720 721 private:
721 722 Bytecodes::Code _orig_bytecode;
722 723 int _bci;
723 724 u2 _name_index; // of method
724 725 u2 _signature_index; // of method
725 726 BreakpointInfo* _next; // simple storage allocation
726 727
727 728 public:
728 729 BreakpointInfo(methodOop m, int bci);
729 730
730 731 // accessors
731 732 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
732 733 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
733 734 int bci() { return _bci; }
734 735
735 736 BreakpointInfo* next() const { return _next; }
736 737 void set_next(BreakpointInfo* n) { _next = n; }
737 738
738 739 // helps for searchers
739 740 bool match(methodOop m, int bci) {
740 741 return bci == _bci && match(m);
741 742 }
742 743
743 744 bool match(methodOop m) {
744 745 return _name_index == m->name_index() &&
745 746 _signature_index == m->signature_index();
746 747 }
747 748
748 749 void set(methodOop method);
749 750 void clear(methodOop method);
750 751 };
↓ open down ↓ |
373 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX