1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcLocker.hpp"
32 #include "gc/shared/generation.hpp"
33 #include "interpreter/bytecodeStream.hpp"
34 #include "interpreter/bytecodeTracer.hpp"
35 #include "interpreter/bytecodes.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "interpreter/oopMapCache.hpp"
38 #include "memory/heapInspection.hpp"
39 #include "memory/metadataFactory.hpp"
40 #include "memory/oopFactory.hpp"
41 #include "oops/constMethod.hpp"
42 #include "oops/method.hpp"
43 #include "oops/methodData.hpp"
44 #include "oops/objArrayOop.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/symbol.hpp"
47 #include "prims/jvmtiExport.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "prims/nativeLookup.hpp"
50 #include "runtime/arguments.hpp"
51 #include "runtime/compilationPolicy.hpp"
52 #include "runtime/frame.inline.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/orderAccess.inline.hpp"
55 #include "runtime/relocator.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/signature.hpp"
58 #include "utilities/quickSort.hpp"
59 #include "utilities/xmlstream.hpp"
60
61 // Implementation of Method
62
63 Method* Method::allocate(ClassLoaderData* loader_data,
64 int byte_code_size,
65 AccessFlags access_flags,
66 InlineTableSizes* sizes,
67 ConstMethod::MethodType method_type,
68 TRAPS) {
69 assert(!access_flags.is_native() || byte_code_size == 0,
70 "native methods should not contain byte codes");
71 ConstMethod* cm = ConstMethod::allocate(loader_data,
72 byte_code_size,
73 sizes,
74 method_type,
75 CHECK_NULL);
76 int size = Method::size(access_flags.is_native());
77 return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
78 }
79
80 Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
81 NoSafepointVerifier no_safepoint;
82 set_constMethod(xconst);
83 set_access_flags(access_flags);
84 #ifdef CC_INTERP
85 set_result_index(T_VOID);
86 #endif
87 set_intrinsic_id(vmIntrinsics::_none);
88 set_jfr_towrite(false);
89 set_force_inline(false);
90 set_hidden(false);
91 set_dont_inline(false);
92 set_has_injected_profile(false);
93 set_method_data(NULL);
94 clear_method_counters();
95 set_vtable_index(Method::garbage_vtable_index);
96
97 // Fix and bury in Method*
98 set_interpreter_entry(NULL); // sets i2i entry and from_int
99 set_adapter_entry(NULL);
100 clear_code(); // from_c/from_i get set to c2i/i2i
101
102 if (access_flags.is_native()) {
103 clear_native_function();
104 set_signature_handler(NULL);
105 }
106
107 NOT_PRODUCT(set_compiled_invocation_count(0);)
108 }
109
110 // Release Method*. The nmethod will be gone when we get here because
111 // we've walked the code cache.
112 void Method::deallocate_contents(ClassLoaderData* loader_data) {
113 MetadataFactory::free_metadata(loader_data, constMethod());
114 set_constMethod(NULL);
115 MetadataFactory::free_metadata(loader_data, method_data());
116 set_method_data(NULL);
117 MetadataFactory::free_metadata(loader_data, method_counters());
118 clear_method_counters();
119 // The nmethod will be gone when we get here.
120 if (code() != NULL) _code = NULL;
121 }
122
123 address Method::get_i2c_entry() {
124 assert(_adapter != NULL, "must have");
125 return _adapter->get_i2c_entry();
126 }
127
128 address Method::get_c2i_entry() {
129 assert(_adapter != NULL, "must have");
130 return _adapter->get_c2i_entry();
131 }
132
133 address Method::get_c2i_unverified_entry() {
134 assert(_adapter != NULL, "must have");
135 return _adapter->get_c2i_unverified_entry();
136 }
137
138 char* Method::name_and_sig_as_C_string() const {
139 return name_and_sig_as_C_string(constants()->pool_holder(), name(), signature());
140 }
141
142 char* Method::name_and_sig_as_C_string(char* buf, int size) const {
143 return name_and_sig_as_C_string(constants()->pool_holder(), name(), signature(), buf, size);
144 }
145
146 char* Method::name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature) {
147 const char* klass_name = klass->external_name();
148 int klass_name_len = (int)strlen(klass_name);
149 int method_name_len = method_name->utf8_length();
150 int len = klass_name_len + 1 + method_name_len + signature->utf8_length();
151 char* dest = NEW_RESOURCE_ARRAY(char, len + 1);
152 strcpy(dest, klass_name);
153 dest[klass_name_len] = '.';
154 strcpy(&dest[klass_name_len + 1], method_name->as_C_string());
155 strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string());
156 dest[len] = 0;
157 return dest;
158 }
159
160 char* Method::name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size) {
161 Symbol* klass_name = klass->name();
162 klass_name->as_klass_external_name(buf, size);
163 int len = (int)strlen(buf);
164
165 if (len < size - 1) {
166 buf[len++] = '.';
167
168 method_name->as_C_string(&(buf[len]), size - len);
169 len = (int)strlen(buf);
170
171 signature->as_C_string(&(buf[len]), size - len);
172 }
173
174 return buf;
175 }
176
177 int Method::fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS) {
178 // exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
179 // access exception table
180 ExceptionTable table(mh());
181 int length = table.length();
182 // iterate through all entries sequentially
183 constantPoolHandle pool(THREAD, mh->constants());
184 for (int i = 0; i < length; i ++) {
185 //reacquire the table in case a GC happened
186 ExceptionTable table(mh());
187 int beg_bci = table.start_pc(i);
188 int end_bci = table.end_pc(i);
189 assert(beg_bci <= end_bci, "inconsistent exception table");
190 if (beg_bci <= throw_bci && throw_bci < end_bci) {
191 // exception handler bci range covers throw_bci => investigate further
192 int handler_bci = table.handler_pc(i);
193 int klass_index = table.catch_type_index(i);
194 if (klass_index == 0) {
195 return handler_bci;
196 } else if (ex_klass.is_null()) {
197 return handler_bci;
198 } else {
199 // we know the exception class => get the constraint class
200 // this may require loading of the constraint class; if verification
201 // fails or some other exception occurs, return handler_bci
202 Klass* k = pool->klass_at(klass_index, CHECK_(handler_bci));
203 KlassHandle klass = KlassHandle(THREAD, k);
204 assert(klass.not_null(), "klass not loaded");
205 if (ex_klass->is_subtype_of(klass())) {
206 return handler_bci;
207 }
208 }
209 }
210 }
211
212 return -1;
213 }
214
215 void Method::mask_for(int bci, InterpreterOopMap* mask) {
216
217 Thread* myThread = Thread::current();
218 methodHandle h_this(myThread, this);
219 #if defined(ASSERT) && !INCLUDE_JVMCI
220 bool has_capability = myThread->is_VM_thread() ||
221 myThread->is_ConcurrentGC_thread() ||
222 myThread->is_GC_task_thread();
223
224 if (!has_capability) {
225 if (!VerifyStack && !VerifyLastFrame) {
226 // verify stack calls this outside VM thread
227 warning("oopmap should only be accessed by the "
228 "VM, GC task or CMS threads (or during debugging)");
229 InterpreterOopMap local_mask;
230 method_holder()->mask_for(h_this, bci, &local_mask);
231 local_mask.print();
232 }
233 }
234 #endif
235 method_holder()->mask_for(h_this, bci, mask);
236 return;
237 }
238
239
240 int Method::bci_from(address bcp) const {
241 if (is_native() && bcp == 0) {
242 return 0;
243 }
244 #ifdef ASSERT
245 {
246 ResourceMark rm;
247 assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(),
248 "bcp doesn't belong to this method: bcp: " INTPTR_FORMAT ", method: %s",
249 p2i(bcp), name_and_sig_as_C_string());
250 }
251 #endif
252 return bcp - code_base();
253 }
254
255
256 int Method::validate_bci(int bci) const {
257 return (bci == 0 || bci < code_size()) ? bci : -1;
258 }
259
260 // Return bci if it appears to be a valid bcp
261 // Return -1 otherwise.
262 // Used by profiling code, when invalid data is a possibility.
263 // The caller is responsible for validating the Method* itself.
264 int Method::validate_bci_from_bcp(address bcp) const {
265 // keep bci as -1 if not a valid bci
266 int bci = -1;
267 if (bcp == 0 || bcp == code_base()) {
268 // code_size() may return 0 and we allow 0 here
269 // the method may be native
270 bci = 0;
271 } else if (contains(bcp)) {
272 bci = bcp - code_base();
273 }
274 // Assert that if we have dodged any asserts, bci is negative.
275 assert(bci == -1 || bci == bci_from(bcp_from(bci)), "sane bci if >=0");
276 return bci;
277 }
278
279 address Method::bcp_from(int bci) const {
280 assert((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size()), "illegal bci: %d", bci);
281 address bcp = code_base() + bci;
282 assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
283 return bcp;
284 }
285
286 address Method::bcp_from(address bcp) const {
287 if (is_native() && bcp == NULL) {
288 return code_base();
289 } else {
290 return bcp;
291 }
292 }
293
294 int Method::size(bool is_native) {
295 // If native, then include pointers for native_function and signature_handler
296 int extra_bytes = (is_native) ? 2*sizeof(address*) : 0;
297 int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
298 return align_object_size(header_size() + extra_words);
299 }
300
301
302 Symbol* Method::klass_name() const {
303 return method_holder()->name();
304 }
305
306
307 // Attempt to return method oop to original state. Clear any pointers
308 // (to objects outside the shared spaces). We won't be able to predict
309 // where they should point in a new JVM. Further initialize some
310 // entries now in order allow them to be write protected later.
311
312 void Method::remove_unshareable_info() {
313 unlink_method();
314 }
315
316
317 bool Method::was_executed_more_than(int n) {
318 // Invocation counter is reset when the Method* is compiled.
319 // If the method has compiled code we therefore assume it has
320 // be excuted more than n times.
321 if (is_accessor() || is_empty_method() || (code() != NULL)) {
322 // interpreter doesn't bump invocation counter of trivial methods
323 // compiler does not bump invocation counter of compiled methods
324 return true;
325 }
326 else if ((method_counters() != NULL &&
327 method_counters()->invocation_counter()->carry()) ||
328 (method_data() != NULL &&
329 method_data()->invocation_counter()->carry())) {
330 // The carry bit is set when the counter overflows and causes
331 // a compilation to occur. We don't know how many times
332 // the counter has been reset, so we simply assume it has
333 // been executed more than n times.
334 return true;
335 } else {
336 return invocation_count() > n;
337 }
338 }
339
340 void Method::print_invocation_count() {
341 if (is_static()) tty->print("static ");
342 if (is_final()) tty->print("final ");
343 if (is_synchronized()) tty->print("synchronized ");
344 if (is_native()) tty->print("native ");
345 tty->print("%s::", method_holder()->external_name());
346 name()->print_symbol_on(tty);
347 signature()->print_symbol_on(tty);
348
349 if (WizardMode) {
350 // dump the size of the byte codes
351 tty->print(" {%d}", code_size());
352 }
353 tty->cr();
354
355 tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count());
356 tty->print_cr (" invocation_counter: %8d ", invocation_count());
357 tty->print_cr (" backedge_counter: %8d ", backedge_count());
358 #ifndef PRODUCT
359 if (CountCompiledCalls) {
360 tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count());
361 }
362 #endif
363 }
364
365 // Build a MethodData* object to hold information about this method
366 // collected in the interpreter.
367 void Method::build_interpreter_method_data(const methodHandle& method, TRAPS) {
368 // Do not profile the method if metaspace has hit an OOM previously
369 // allocating profiling data. Callers clear pending exception so don't
370 // add one here.
371 if (ClassLoaderDataGraph::has_metaspace_oom()) {
372 return;
373 }
374
375 // Do not profile method if current thread holds the pending list lock,
376 // which avoids deadlock for acquiring the MethodData_lock.
377 if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
378 return;
379 }
380
381 // Grab a lock here to prevent multiple
382 // MethodData*s from being created.
383 MutexLocker ml(MethodData_lock, THREAD);
384 if (method->method_data() == NULL) {
385 ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
386 MethodData* method_data = MethodData::allocate(loader_data, method, THREAD);
387 if (HAS_PENDING_EXCEPTION) {
388 CompileBroker::log_metaspace_failure();
389 ClassLoaderDataGraph::set_metaspace_oom(true);
390 return; // return the exception (which is cleared)
391 }
392
393 method->set_method_data(method_data);
394 if (PrintMethodData && (Verbose || WizardMode)) {
395 ResourceMark rm(THREAD);
396 tty->print("build_interpreter_method_data for ");
397 method->print_name(tty);
398 tty->cr();
399 // At the end of the run, the MDO, full of data, will be dumped.
400 }
401 }
402 }
403
404 MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
405 // Do not profile the method if metaspace has hit an OOM previously
406 if (ClassLoaderDataGraph::has_metaspace_oom()) {
407 return NULL;
408 }
409
410 methodHandle mh(m);
411 MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
412 if (HAS_PENDING_EXCEPTION) {
413 CompileBroker::log_metaspace_failure();
414 ClassLoaderDataGraph::set_metaspace_oom(true);
415 return NULL; // return the exception (which is cleared)
416 }
417 if (!mh->init_method_counters(counters)) {
418 MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters);
419 }
420
421 if (LogTouchedMethods) {
422 mh->log_touched(CHECK_NULL);
423 }
424
425 return mh->method_counters();
426 }
427
428 void Method::cleanup_inline_caches() {
429 // The current system doesn't use inline caches in the interpreter
430 // => nothing to do (keep this method around for future use)
431 }
432
433
434 int Method::extra_stack_words() {
435 // not an inline function, to avoid a header dependency on Interpreter
436 return extra_stack_entries() * Interpreter::stackElementSize;
437 }
438
439
440 void Method::compute_size_of_parameters(Thread *thread) {
441 ArgumentSizeComputer asc(signature());
442 set_size_of_parameters(asc.size() + (is_static() ? 0 : 1));
443 }
444
445 #ifdef CC_INTERP
446 void Method::set_result_index(BasicType type) {
447 _result_index = Interpreter::BasicType_as_index(type);
448 }
449 #endif
450
451 BasicType Method::result_type() const {
452 ResultTypeFinder rtf(signature());
453 return rtf.type();
454 }
455
456
457 bool Method::is_empty_method() const {
458 return code_size() == 1
459 && *code_base() == Bytecodes::_return;
460 }
461
462
463 bool Method::is_vanilla_constructor() const {
464 // Returns true if this method is a vanilla constructor, i.e. an "<init>" "()V" method
465 // which only calls the superclass vanilla constructor and possibly does stores of
466 // zero constants to local fields:
467 //
468 // aload_0
469 // invokespecial
470 // indexbyte1
471 // indexbyte2
472 //
473 // followed by an (optional) sequence of:
474 //
475 // aload_0
476 // aconst_null / iconst_0 / fconst_0 / dconst_0
477 // putfield
478 // indexbyte1
479 // indexbyte2
480 //
481 // followed by:
482 //
483 // return
484
485 assert(name() == vmSymbols::object_initializer_name(), "Should only be called for default constructors");
486 assert(signature() == vmSymbols::void_method_signature(), "Should only be called for default constructors");
487 int size = code_size();
488 // Check if size match
489 if (size == 0 || size % 5 != 0) return false;
490 address cb = code_base();
491 int last = size - 1;
492 if (cb[0] != Bytecodes::_aload_0 || cb[1] != Bytecodes::_invokespecial || cb[last] != Bytecodes::_return) {
493 // Does not call superclass default constructor
494 return false;
495 }
496 // Check optional sequence
497 for (int i = 4; i < last; i += 5) {
498 if (cb[i] != Bytecodes::_aload_0) return false;
499 if (!Bytecodes::is_zero_const(Bytecodes::cast(cb[i+1]))) return false;
500 if (cb[i+2] != Bytecodes::_putfield) return false;
501 }
502 return true;
503 }
504
505
506 bool Method::compute_has_loops_flag() {
507 BytecodeStream bcs(this);
508 Bytecodes::Code bc;
509
510 while ((bc = bcs.next()) >= 0) {
511 switch( bc ) {
512 case Bytecodes::_ifeq:
513 case Bytecodes::_ifnull:
514 case Bytecodes::_iflt:
515 case Bytecodes::_ifle:
516 case Bytecodes::_ifne:
517 case Bytecodes::_ifnonnull:
518 case Bytecodes::_ifgt:
519 case Bytecodes::_ifge:
520 case Bytecodes::_if_icmpeq:
521 case Bytecodes::_if_icmpne:
522 case Bytecodes::_if_icmplt:
523 case Bytecodes::_if_icmpgt:
524 case Bytecodes::_if_icmple:
525 case Bytecodes::_if_icmpge:
526 case Bytecodes::_if_acmpeq:
527 case Bytecodes::_if_acmpne:
528 case Bytecodes::_goto:
529 case Bytecodes::_jsr:
530 if( bcs.dest() < bcs.next_bci() ) _access_flags.set_has_loops();
531 break;
532
533 case Bytecodes::_goto_w:
534 case Bytecodes::_jsr_w:
535 if( bcs.dest_w() < bcs.next_bci() ) _access_flags.set_has_loops();
536 break;
537 }
538 }
539 _access_flags.set_loops_flag_init();
540 return _access_flags.has_loops();
541 }
542
543 bool Method::is_final_method(AccessFlags class_access_flags) const {
544 // or "does_not_require_vtable_entry"
545 // default method or overpass can occur, is not final (reuses vtable entry)
546 // private methods get vtable entries for backward class compatibility.
547 if (is_overpass() || is_default_method()) return false;
548 return is_final() || class_access_flags.is_final();
549 }
550
551 bool Method::is_final_method() const {
552 return is_final_method(method_holder()->access_flags());
553 }
554
555 bool Method::is_default_method() const {
556 if (method_holder() != NULL &&
557 method_holder()->is_interface() &&
558 !is_abstract()) {
559 return true;
560 } else {
561 return false;
562 }
563 }
564
565 bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
566 if (is_final_method(class_access_flags)) return true;
567 #ifdef ASSERT
568 ResourceMark rm;
569 bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
570 if (class_access_flags.is_interface()) {
571 assert(is_nonv == is_static(), "is_nonv=%s", name_and_sig_as_C_string());
572 }
573 #endif
574 assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
575 return vtable_index() == nonvirtual_vtable_index;
576 }
577
578 bool Method::can_be_statically_bound() const {
579 return can_be_statically_bound(method_holder()->access_flags());
580 }
581
582 bool Method::is_accessor() const {
583 return is_getter() || is_setter();
584 }
585
586 bool Method::is_getter() const {
587 if (code_size() != 5) return false;
588 if (size_of_parameters() != 1) return false;
589 if (java_code_at(0) != Bytecodes::_aload_0) return false;
590 if (java_code_at(1) != Bytecodes::_getfield) return false;
591 switch (java_code_at(4)) {
592 case Bytecodes::_ireturn:
593 case Bytecodes::_lreturn:
594 case Bytecodes::_freturn:
595 case Bytecodes::_dreturn:
596 case Bytecodes::_areturn:
597 break;
598 default:
599 return false;
600 }
601 return true;
602 }
603
604 bool Method::is_setter() const {
605 if (code_size() != 6) return false;
606 if (java_code_at(0) != Bytecodes::_aload_0) return false;
607 switch (java_code_at(1)) {
608 case Bytecodes::_iload_1:
609 case Bytecodes::_aload_1:
610 case Bytecodes::_fload_1:
611 if (size_of_parameters() != 2) return false;
612 break;
613 case Bytecodes::_dload_1:
614 case Bytecodes::_lload_1:
615 if (size_of_parameters() != 3) return false;
616 break;
617 default:
618 return false;
619 }
620 if (java_code_at(2) != Bytecodes::_putfield) return false;
621 if (java_code_at(5) != Bytecodes::_return) return false;
622 return true;
623 }
624
625 bool Method::is_constant_getter() const {
626 int last_index = code_size() - 1;
627 // Check if the first 1-3 bytecodes are a constant push
628 // and the last bytecode is a return.
629 return (2 <= code_size() && code_size() <= 4 &&
630 Bytecodes::is_const(java_code_at(0)) &&
631 Bytecodes::length_for(java_code_at(0)) == last_index &&
632 Bytecodes::is_return(java_code_at(last_index)));
633 }
634
635 bool Method::is_initializer() const {
636 return name() == vmSymbols::object_initializer_name() || is_static_initializer();
637 }
638
639 bool Method::has_valid_initializer_flags() const {
640 return (is_static() ||
641 method_holder()->major_version() < 51);
642 }
643
644 bool Method::is_static_initializer() const {
645 // For classfiles version 51 or greater, ensure that the clinit method is
646 // static. Non-static methods with the name "<clinit>" are not static
647 // initializers. (older classfiles exempted for backward compatibility)
648 return name() == vmSymbols::class_initializer_name() &&
649 has_valid_initializer_flags();
650 }
651
652
653 objArrayHandle Method::resolved_checked_exceptions_impl(Method* method, TRAPS) {
654 int length = method->checked_exceptions_length();
655 if (length == 0) { // common case
656 return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
657 } else {
658 methodHandle h_this(THREAD, method);
659 objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
660 objArrayHandle mirrors (THREAD, m_oop);
661 for (int i = 0; i < length; i++) {
662 CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
663 Klass* k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
664 assert(k->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
665 mirrors->obj_at_put(i, k->java_mirror());
666 }
667 return mirrors;
668 }
669 };
670
671
672 int Method::line_number_from_bci(int bci) const {
673 if (bci == SynchronizationEntryBCI) bci = 0;
674 assert(bci == 0 || 0 <= bci && bci < code_size(), "illegal bci");
675 int best_bci = 0;
676 int best_line = -1;
677
678 if (has_linenumber_table()) {
679 // The line numbers are a short array of 2-tuples [start_pc, line_number].
680 // Not necessarily sorted and not necessarily one-to-one.
681 CompressedLineNumberReadStream stream(compressed_linenumber_table());
682 while (stream.read_pair()) {
683 if (stream.bci() == bci) {
684 // perfect match
685 return stream.line();
686 } else {
687 // update best_bci/line
688 if (stream.bci() < bci && stream.bci() >= best_bci) {
689 best_bci = stream.bci();
690 best_line = stream.line();
691 }
692 }
693 }
694 }
695 return best_line;
696 }
697
698
699 bool Method::is_klass_loaded_by_klass_index(int klass_index) const {
700 if( constants()->tag_at(klass_index).is_unresolved_klass() ) {
701 Thread *thread = Thread::current();
702 Symbol* klass_name = constants()->klass_name_at(klass_index);
703 Handle loader(thread, method_holder()->class_loader());
704 Handle prot (thread, method_holder()->protection_domain());
705 return SystemDictionary::find(klass_name, loader, prot, thread) != NULL;
706 } else {
707 return true;
708 }
709 }
710
711
712 bool Method::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
713 int klass_index = constants()->klass_ref_index_at(refinfo_index);
714 if (must_be_resolved) {
715 // Make sure klass is resolved in constantpool.
716 if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
717 }
718 return is_klass_loaded_by_klass_index(klass_index);
719 }
720
721
722 void Method::set_native_function(address function, bool post_event_flag) {
723 assert(function != NULL, "use clear_native_function to unregister natives");
724 assert(!is_method_handle_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), "");
725 address* native_function = native_function_addr();
726
727 // We can see racers trying to place the same native function into place. Once
728 // is plenty.
729 address current = *native_function;
730 if (current == function) return;
731 if (post_event_flag && JvmtiExport::should_post_native_method_bind() &&
732 function != NULL) {
733 // native_method_throw_unsatisfied_link_error_entry() should only
734 // be passed when post_event_flag is false.
735 assert(function !=
736 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
737 "post_event_flag mis-match");
738
739 // post the bind event, and possible change the bind function
740 JvmtiExport::post_native_method_bind(this, &function);
741 }
742 *native_function = function;
743 // This function can be called more than once. We must make sure that we always
744 // use the latest registered method -> check if a stub already has been generated.
745 // If so, we have to make it not_entrant.
746 nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
747 if (nm != NULL) {
748 nm->make_not_entrant();
749 }
750 }
751
752
753 bool Method::has_native_function() const {
754 if (is_method_handle_intrinsic())
755 return false; // special-cased in SharedRuntime::generate_native_wrapper
756 address func = native_function();
757 return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
758 }
759
760
761 void Method::clear_native_function() {
762 // Note: is_method_handle_intrinsic() is allowed here.
763 set_native_function(
764 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
765 !native_bind_event_is_interesting);
766 clear_code();
767 }
768
769 address Method::critical_native_function() {
770 methodHandle mh(this);
771 return NativeLookup::lookup_critical_entry(mh);
772 }
773
774
775 void Method::set_signature_handler(address handler) {
776 address* signature_handler = signature_handler_addr();
777 *signature_handler = handler;
778 }
779
780
781 void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) {
782 if (PrintCompilation && report) {
783 ttyLocker ttyl;
784 tty->print("made not %scompilable on ", is_osr ? "OSR " : "");
785 if (comp_level == CompLevel_all) {
786 tty->print("all levels ");
787 } else {
788 tty->print("levels ");
789 for (int i = (int)CompLevel_none; i <= comp_level; i++) {
790 tty->print("%d ", i);
791 }
792 }
793 this->print_short_name(tty);
794 int size = this->code_size();
795 if (size > 0) {
796 tty->print(" (%d bytes)", size);
797 }
798 if (reason != NULL) {
799 tty->print(" %s", reason);
800 }
801 tty->cr();
802 }
803 if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
804 ttyLocker ttyl;
805 xtty->begin_elem("make_not_compilable thread='" UINTX_FORMAT "' osr='%d' level='%d'",
806 os::current_thread_id(), is_osr, comp_level);
807 if (reason != NULL) {
808 xtty->print(" reason=\'%s\'", reason);
809 }
810 xtty->method(this);
811 xtty->stamp();
812 xtty->end_elem();
813 }
814 }
815
816 bool Method::is_always_compilable() const {
817 // Generated adapters must be compiled
818 if (is_method_handle_intrinsic() && is_synthetic()) {
819 assert(!is_not_c1_compilable(), "sanity check");
820 assert(!is_not_c2_compilable(), "sanity check");
821 return true;
822 }
823
824 return false;
825 }
826
827 bool Method::is_not_compilable(int comp_level) const {
828 if (number_of_breakpoints() > 0)
829 return true;
830 if (is_always_compilable())
831 return false;
832 if (comp_level == CompLevel_any)
833 return is_not_c1_compilable() || is_not_c2_compilable();
834 if (is_c1_compile(comp_level))
835 return is_not_c1_compilable();
836 if (is_c2_compile(comp_level))
837 return is_not_c2_compilable();
838 return false;
839 }
840
841 // call this when compiler finds that this method is not compilable
842 void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
843 if (is_always_compilable()) {
844 // Don't mark a method which should be always compilable
845 return;
846 }
847 print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
848 if (comp_level == CompLevel_all) {
849 set_not_c1_compilable();
850 set_not_c2_compilable();
851 } else {
852 if (is_c1_compile(comp_level))
853 set_not_c1_compilable();
854 if (is_c2_compile(comp_level))
855 set_not_c2_compilable();
856 }
857 CompilationPolicy::policy()->disable_compilation(this);
858 assert(!CompilationPolicy::can_be_compiled(this, comp_level), "sanity check");
859 }
860
861 bool Method::is_not_osr_compilable(int comp_level) const {
862 if (is_not_compilable(comp_level))
863 return true;
864 if (comp_level == CompLevel_any)
865 return is_not_c1_osr_compilable() || is_not_c2_osr_compilable();
866 if (is_c1_compile(comp_level))
867 return is_not_c1_osr_compilable();
868 if (is_c2_compile(comp_level))
869 return is_not_c2_osr_compilable();
870 return false;
871 }
872
873 void Method::set_not_osr_compilable(int comp_level, bool report, const char* reason) {
874 print_made_not_compilable(comp_level, /*is_osr*/ true, report, reason);
875 if (comp_level == CompLevel_all) {
876 set_not_c1_osr_compilable();
877 set_not_c2_osr_compilable();
878 } else {
879 if (is_c1_compile(comp_level))
880 set_not_c1_osr_compilable();
881 if (is_c2_compile(comp_level))
882 set_not_c2_osr_compilable();
883 }
884 CompilationPolicy::policy()->disable_compilation(this);
885 assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check");
886 }
887
888 // Revert to using the interpreter and clear out the nmethod
889 void Method::clear_code() {
890
891 // this may be NULL if c2i adapters have not been made yet
892 // Only should happen at allocate time.
893 if (_adapter == NULL) {
894 _from_compiled_entry = NULL;
895 } else {
896 _from_compiled_entry = _adapter->get_c2i_entry();
897 }
898 OrderAccess::storestore();
899 _from_interpreted_entry = _i2i_entry;
900 OrderAccess::storestore();
901 _code = NULL;
902 }
903
904 // Called by class data sharing to remove any entry points (which are not shared)
905 void Method::unlink_method() {
906 _code = NULL;
907 _i2i_entry = NULL;
908 _from_interpreted_entry = NULL;
909 if (is_native()) {
910 *native_function_addr() = NULL;
911 set_signature_handler(NULL);
912 }
913 NOT_PRODUCT(set_compiled_invocation_count(0);)
914 _adapter = NULL;
915 _from_compiled_entry = NULL;
916
917 // In case of DumpSharedSpaces, _method_data should always be NULL.
918 //
919 // During runtime (!DumpSharedSpaces), when we are cleaning a
920 // shared class that failed to load, this->link_method() may
921 // have already been called (before an exception happened), so
922 // this->_method_data may not be NULL.
923 assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?");
924
925 set_method_data(NULL);
926 clear_method_counters();
927 }
928
929 // Called when the method_holder is getting linked. Setup entrypoints so the method
930 // is ready to be called from interpreter, compiler, and vtables.
931 void Method::link_method(const methodHandle& h_method, TRAPS) {
932 // If the code cache is full, we may reenter this function for the
933 // leftover methods that weren't linked.
934 if (_i2i_entry != NULL) return;
935
936 assert(_adapter == NULL, "init'd to NULL" );
937 assert( _code == NULL, "nothing compiled yet" );
938
939 // Setup interpreter entrypoint
940 assert(this == h_method(), "wrong h_method()" );
941 address entry = Interpreter::entry_for_method(h_method);
942 assert(entry != NULL, "interpreter entry must be non-null");
943 // Sets both _i2i_entry and _from_interpreted_entry
944 set_interpreter_entry(entry);
945
946 // Don't overwrite already registered native entries.
947 if (is_native() && !has_native_function()) {
948 set_native_function(
949 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
950 !native_bind_event_is_interesting);
951 }
952
953 // Setup compiler entrypoint. This is made eagerly, so we do not need
954 // special handling of vtables. An alternative is to make adapters more
955 // lazily by calling make_adapter() from from_compiled_entry() for the
956 // normal calls. For vtable calls life gets more complicated. When a
957 // call-site goes mega-morphic we need adapters in all methods which can be
958 // called from the vtable. We need adapters on such methods that get loaded
959 // later. Ditto for mega-morphic itable calls. If this proves to be a
960 // problem we'll make these lazily later.
961 (void) make_adapters(h_method, CHECK);
962
963 // ONLY USE the h_method now as make_adapter may have blocked
964
965 }
966
967 address Method::make_adapters(methodHandle mh, TRAPS) {
968 // Adapters for compiled code are made eagerly here. They are fairly
969 // small (generally < 100 bytes) and quick to make (and cached and shared)
970 // so making them eagerly shouldn't be too expensive.
971 AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
972 if (adapter == NULL ) {
973 THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for adapters");
974 }
975
976 mh->set_adapter_entry(adapter);
977 mh->_from_compiled_entry = adapter->get_c2i_entry();
978 return adapter->get_c2i_entry();
979 }
980
981 void Method::restore_unshareable_info(TRAPS) {
982 // Since restore_unshareable_info can be called more than once for a method, don't
983 // redo any work. If this field is restored, there is nothing to do.
984 if (_from_compiled_entry == NULL) {
985 // restore method's vtable by calling a virtual function
986 restore_vtable();
987
988 methodHandle mh(THREAD, this);
989 link_method(mh, CHECK);
990 }
991 }
992
993
994 // The verified_code_entry() must be called when a invoke is resolved
995 // on this method.
996
997 // It returns the compiled code entry point, after asserting not null.
998 // This function is called after potential safepoints so that nmethod
999 // or adapter that it points to is still live and valid.
1000 // This function must not hit a safepoint!
1001 address Method::verified_code_entry() {
1002 debug_only(NoSafepointVerifier nsv;)
1003 assert(_from_compiled_entry != NULL, "must be set");
1004 return _from_compiled_entry;
1005 }
1006
1007 // Check that if an nmethod ref exists, it has a backlink to this or no backlink at all
1008 // (could be racing a deopt).
1009 // Not inline to avoid circular ref.
1010 bool Method::check_code() const {
1011 // cached in a register or local. There's a race on the value of the field.
1012 nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
1013 return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
1014 }
1015
1016 // Install compiled code. Instantly it can execute.
1017 void Method::set_code(methodHandle mh, nmethod *code) {
1018 assert( code, "use clear_code to remove code" );
1019 assert( mh->check_code(), "" );
1020
1021 guarantee(mh->adapter() != NULL, "Adapter blob must already exist!");
1022
1023 // These writes must happen in this order, because the interpreter will
1024 // directly jump to from_interpreted_entry which jumps to an i2c adapter
1025 // which jumps to _from_compiled_entry.
1026 mh->_code = code; // Assign before allowing compiled code to exec
1027
1028 int comp_level = code->comp_level();
1029 // In theory there could be a race here. In practice it is unlikely
1030 // and not worth worrying about.
1031 if (comp_level > mh->highest_comp_level()) {
1032 mh->set_highest_comp_level(comp_level);
1033 }
1034
1035 OrderAccess::storestore();
1036 #ifdef SHARK
1037 mh->_from_interpreted_entry = code->insts_begin();
1038 #else //!SHARK
1039 mh->_from_compiled_entry = code->verified_entry_point();
1040 OrderAccess::storestore();
1041 // Instantly compiled code can execute.
1042 if (!mh->is_method_handle_intrinsic())
1043 mh->_from_interpreted_entry = mh->get_i2c_entry();
1044 #endif //!SHARK
1045 }
1046
1047
1048 bool Method::is_overridden_in(Klass* k) const {
1049 InstanceKlass* ik = InstanceKlass::cast(k);
1050
1051 if (ik->is_interface()) return false;
1052
1053 // If method is an interface, we skip it - except if it
1054 // is a miranda method
1055 if (method_holder()->is_interface()) {
1056 // Check that method is not a miranda method
1057 if (ik->lookup_method(name(), signature()) == NULL) {
1058 // No implementation exist - so miranda method
1059 return false;
1060 }
1061 return true;
1062 }
1063
1064 assert(ik->is_subclass_of(method_holder()), "should be subklass");
1065 assert(ik->vtable() != NULL, "vtable should exist");
1066 if (!has_vtable_index()) {
1067 return false;
1068 } else {
1069 Method* vt_m = ik->method_at_vtable(vtable_index());
1070 return vt_m != this;
1071 }
1072 }
1073
1074
1075 // give advice about whether this Method* should be cached or not
1076 bool Method::should_not_be_cached() const {
1077 if (is_old()) {
1078 // This method has been redefined. It is either EMCP or obsolete
1079 // and we don't want to cache it because that would pin the method
1080 // down and prevent it from being collectible if and when it
1081 // finishes executing.
1082 return true;
1083 }
1084
1085 // caching this method should be just fine
1086 return false;
1087 }
1088
1089
1090 /**
1091 * Returns true if this is one of the specially treated methods for
1092 * security related stack walks (like Reflection.getCallerClass).
1093 */
1094 bool Method::is_ignored_by_security_stack_walk() const {
1095 if (intrinsic_id() == vmIntrinsics::_invoke) {
1096 // This is Method.invoke() -- ignore it
1097 return true;
1098 }
1099 if (method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
1100 // This is an auxilary frame -- ignore it
1101 return true;
1102 }
1103 if (is_method_handle_intrinsic() || is_compiled_lambda_form()) {
1104 // This is an internal adapter frame for method handles -- ignore it
1105 return true;
1106 }
1107 return false;
1108 }
1109
1110
1111 // Constant pool structure for invoke methods:
1112 enum {
1113 _imcp_invoke_name = 1, // utf8: 'invokeExact', etc.
1114 _imcp_invoke_signature, // utf8: (variable Symbol*)
1115 _imcp_limit
1116 };
1117
1118 // Test if this method is an MH adapter frame generated by Java code.
1119 // Cf. java/lang/invoke/InvokerBytecodeGenerator
1120 bool Method::is_compiled_lambda_form() const {
1121 return intrinsic_id() == vmIntrinsics::_compiledLambdaForm;
1122 }
1123
1124 // Test if this method is an internal MH primitive method.
1125 bool Method::is_method_handle_intrinsic() const {
1126 vmIntrinsics::ID iid = intrinsic_id();
1127 return (MethodHandles::is_signature_polymorphic(iid) &&
1128 MethodHandles::is_signature_polymorphic_intrinsic(iid));
1129 }
1130
1131 bool Method::has_member_arg() const {
1132 vmIntrinsics::ID iid = intrinsic_id();
1133 return (MethodHandles::is_signature_polymorphic(iid) &&
1134 MethodHandles::has_member_arg(iid));
1135 }
1136
1137 // Make an instance of a signature-polymorphic internal MH primitive.
1138 methodHandle Method::make_method_handle_intrinsic(vmIntrinsics::ID iid,
1139 Symbol* signature,
1140 TRAPS) {
1141 ResourceMark rm;
1142 methodHandle empty;
1143
1144 KlassHandle holder = SystemDictionary::MethodHandle_klass();
1145 Symbol* name = MethodHandles::signature_polymorphic_intrinsic_name(iid);
1146 assert(iid == MethodHandles::signature_polymorphic_name_id(name), "");
1147 if (TraceMethodHandles) {
1148 tty->print_cr("make_method_handle_intrinsic MH.%s%s", name->as_C_string(), signature->as_C_string());
1149 }
1150
1151 // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup)
1152 name->increment_refcount();
1153 signature->increment_refcount();
1154
1155 int cp_length = _imcp_limit;
1156 ClassLoaderData* loader_data = holder->class_loader_data();
1157 constantPoolHandle cp;
1158 {
1159 ConstantPool* cp_oop = ConstantPool::allocate(loader_data, cp_length, CHECK_(empty));
1160 cp = constantPoolHandle(THREAD, cp_oop);
1161 }
1162 cp->set_pool_holder(InstanceKlass::cast(holder()));
1163 cp->symbol_at_put(_imcp_invoke_name, name);
1164 cp->symbol_at_put(_imcp_invoke_signature, signature);
1165 cp->set_has_preresolution();
1166
1167 // decide on access bits: public or not?
1168 int flags_bits = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_FINAL);
1169 bool must_be_static = MethodHandles::is_signature_polymorphic_static(iid);
1170 if (must_be_static) flags_bits |= JVM_ACC_STATIC;
1171 assert((flags_bits & JVM_ACC_PUBLIC) == 0, "do not expose these methods");
1172
1173 methodHandle m;
1174 {
1175 InlineTableSizes sizes;
1176 Method* m_oop = Method::allocate(loader_data, 0,
1177 accessFlags_from(flags_bits), &sizes,
1178 ConstMethod::NORMAL, CHECK_(empty));
1179 m = methodHandle(THREAD, m_oop);
1180 }
1181 m->set_constants(cp());
1182 m->set_name_index(_imcp_invoke_name);
1183 m->set_signature_index(_imcp_invoke_signature);
1184 assert(MethodHandles::is_signature_polymorphic_name(m->name()), "");
1185 assert(m->signature() == signature, "");
1186 #ifdef CC_INTERP
1187 ResultTypeFinder rtf(signature);
1188 m->set_result_index(rtf.type());
1189 #endif
1190 m->compute_size_of_parameters(THREAD);
1191 m->init_intrinsic_id();
1192 assert(m->is_method_handle_intrinsic(), "");
1193 #ifdef ASSERT
1194 if (!MethodHandles::is_signature_polymorphic(m->intrinsic_id())) m->print();
1195 assert(MethodHandles::is_signature_polymorphic(m->intrinsic_id()), "must be an invoker");
1196 assert(m->intrinsic_id() == iid, "correctly predicted iid");
1197 #endif //ASSERT
1198
1199 // Finally, set up its entry points.
1200 assert(m->can_be_statically_bound(), "");
1201 m->set_vtable_index(Method::nonvirtual_vtable_index);
1202 m->link_method(m, CHECK_(empty));
1203
1204 if (TraceMethodHandles && (Verbose || WizardMode))
1205 m->print_on(tty);
1206
1207 return m;
1208 }
1209
1210 Klass* Method::check_non_bcp_klass(Klass* klass) {
1211 if (klass != NULL && klass->class_loader() != NULL) {
1212 if (klass->is_objArray_klass())
1213 klass = ObjArrayKlass::cast(klass)->bottom_klass();
1214 return klass;
1215 }
1216 return NULL;
1217 }
1218
1219
1220 methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
1221 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
1222 // Code below does not work for native methods - they should never get rewritten anyway
1223 assert(!m->is_native(), "cannot rewrite native methods");
1224 // Allocate new Method*
1225 AccessFlags flags = m->access_flags();
1226
1227 ConstMethod* cm = m->constMethod();
1228 int checked_exceptions_len = cm->checked_exceptions_length();
1229 int localvariable_len = cm->localvariable_table_length();
1230 int exception_table_len = cm->exception_table_length();
1231 int method_parameters_len = cm->method_parameters_length();
1232 int method_annotations_len = cm->method_annotations_length();
1233 int parameter_annotations_len = cm->parameter_annotations_length();
1234 int type_annotations_len = cm->type_annotations_length();
1235 int default_annotations_len = cm->default_annotations_length();
1236
1237 InlineTableSizes sizes(
1238 localvariable_len,
1239 new_compressed_linenumber_size,
1240 exception_table_len,
1241 checked_exceptions_len,
1242 method_parameters_len,
1243 cm->generic_signature_index(),
1244 method_annotations_len,
1245 parameter_annotations_len,
1246 type_annotations_len,
1247 default_annotations_len,
1248 0);
1249
1250 ClassLoaderData* loader_data = m->method_holder()->class_loader_data();
1251 Method* newm_oop = Method::allocate(loader_data,
1252 new_code_length,
1253 flags,
1254 &sizes,
1255 m->method_type(),
1256 CHECK_(methodHandle()));
1257 methodHandle newm (THREAD, newm_oop);
1258
1259 // Create a shallow copy of Method part, but be careful to preserve the new ConstMethod*
1260 ConstMethod* newcm = newm->constMethod();
1261 int new_const_method_size = newm->constMethod()->size();
1262
1263 memcpy(newm(), m(), sizeof(Method));
1264
1265 // Create shallow copy of ConstMethod.
1266 memcpy(newcm, m->constMethod(), sizeof(ConstMethod));
1267
1268 // Reset correct method/const method, method size, and parameter info
1269 newm->set_constMethod(newcm);
1270 newm->constMethod()->set_code_size(new_code_length);
1271 newm->constMethod()->set_constMethod_size(new_const_method_size);
1272 assert(newm->code_size() == new_code_length, "check");
1273 assert(newm->method_parameters_length() == method_parameters_len, "check");
1274 assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
1275 assert(newm->exception_table_length() == exception_table_len, "check");
1276 assert(newm->localvariable_table_length() == localvariable_len, "check");
1277 // Copy new byte codes
1278 memcpy(newm->code_base(), new_code, new_code_length);
1279 // Copy line number table
1280 if (new_compressed_linenumber_size > 0) {
1281 memcpy(newm->compressed_linenumber_table(),
1282 new_compressed_linenumber_table,
1283 new_compressed_linenumber_size);
1284 }
1285 // Copy method_parameters
1286 if (method_parameters_len > 0) {
1287 memcpy(newm->method_parameters_start(),
1288 m->method_parameters_start(),
1289 method_parameters_len * sizeof(MethodParametersElement));
1290 }
1291 // Copy checked_exceptions
1292 if (checked_exceptions_len > 0) {
1293 memcpy(newm->checked_exceptions_start(),
1294 m->checked_exceptions_start(),
1295 checked_exceptions_len * sizeof(CheckedExceptionElement));
1296 }
1297 // Copy exception table
1298 if (exception_table_len > 0) {
1299 memcpy(newm->exception_table_start(),
1300 m->exception_table_start(),
1301 exception_table_len * sizeof(ExceptionTableElement));
1302 }
1303 // Copy local variable number table
1304 if (localvariable_len > 0) {
1305 memcpy(newm->localvariable_table_start(),
1306 m->localvariable_table_start(),
1307 localvariable_len * sizeof(LocalVariableTableElement));
1308 }
1309 // Copy stackmap table
1310 if (m->has_stackmap_table()) {
1311 int code_attribute_length = m->stackmap_data()->length();
1312 Array<u1>* stackmap_data =
1313 MetadataFactory::new_array<u1>(loader_data, code_attribute_length, 0, CHECK_NULL);
1314 memcpy((void*)stackmap_data->adr_at(0),
1315 (void*)m->stackmap_data()->adr_at(0), code_attribute_length);
1316 newm->set_stackmap_data(stackmap_data);
1317 }
1318
1319 // copy annotations over to new method
1320 newcm->copy_annotations_from(cm);
1321 return newm;
1322 }
1323
1324 vmSymbols::SID Method::klass_id_for_intrinsics(const Klass* holder) {
1325 // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
1326 // because we are not loading from core libraries
1327 // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
1328 // which does not use the class default class loader so we check for its loader here
1329 const InstanceKlass* ik = InstanceKlass::cast(holder);
1330 if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
1331 return vmSymbols::NO_SID; // regardless of name, no intrinsics here
1332 }
1333
1334 // see if the klass name is well-known:
1335 Symbol* klass_name = ik->name();
1336 return vmSymbols::find_sid(klass_name);
1337 }
1338
1339 static bool is_unsafe_alias(vmSymbols::SID name_id) {
1340 // All 70 intrinsic candidate methods from sun.misc.Unsafe in 1.8.
1341 // Some have the same method name but different signature, e.g.
1342 // getByte(long), getByte(Object,long)
1343 switch (name_id) {
1344 case vmSymbols::VM_SYMBOL_ENUM_NAME(allocateInstance_name):
1345 case vmSymbols::VM_SYMBOL_ENUM_NAME(copyMemory_name):
1346 case vmSymbols::VM_SYMBOL_ENUM_NAME(loadFence_name):
1347 case vmSymbols::VM_SYMBOL_ENUM_NAME(storeFence_name):
1348 case vmSymbols::VM_SYMBOL_ENUM_NAME(fullFence_name):
1349 case vmSymbols::VM_SYMBOL_ENUM_NAME(getObject_name):
1350 case vmSymbols::VM_SYMBOL_ENUM_NAME(getBoolean_name):
1351 case vmSymbols::VM_SYMBOL_ENUM_NAME(getByte_name):
1352 case vmSymbols::VM_SYMBOL_ENUM_NAME(getShort_name):
1353 case vmSymbols::VM_SYMBOL_ENUM_NAME(getChar_name):
1354 case vmSymbols::VM_SYMBOL_ENUM_NAME(getInt_name):
1355 case vmSymbols::VM_SYMBOL_ENUM_NAME(getLong_name):
1356 case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloat_name):
1357 case vmSymbols::VM_SYMBOL_ENUM_NAME(getDouble_name):
1358 case vmSymbols::VM_SYMBOL_ENUM_NAME(putObject_name):
1359 case vmSymbols::VM_SYMBOL_ENUM_NAME(putBoolean_name):
1360 case vmSymbols::VM_SYMBOL_ENUM_NAME(putByte_name):
1361 case vmSymbols::VM_SYMBOL_ENUM_NAME(putShort_name):
1362 case vmSymbols::VM_SYMBOL_ENUM_NAME(putChar_name):
1363 case vmSymbols::VM_SYMBOL_ENUM_NAME(putInt_name):
1364 case vmSymbols::VM_SYMBOL_ENUM_NAME(putLong_name):
1365 case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloat_name):
1366 case vmSymbols::VM_SYMBOL_ENUM_NAME(putDouble_name):
1367 case vmSymbols::VM_SYMBOL_ENUM_NAME(getObjectVolatile_name):
1368 case vmSymbols::VM_SYMBOL_ENUM_NAME(getBooleanVolatile_name):
1369 case vmSymbols::VM_SYMBOL_ENUM_NAME(getByteVolatile_name):
1370 case vmSymbols::VM_SYMBOL_ENUM_NAME(getShortVolatile_name):
1371 case vmSymbols::VM_SYMBOL_ENUM_NAME(getCharVolatile_name):
1372 case vmSymbols::VM_SYMBOL_ENUM_NAME(getIntVolatile_name):
1373 case vmSymbols::VM_SYMBOL_ENUM_NAME(getLongVolatile_name):
1374 case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloatVolatile_name):
1375 case vmSymbols::VM_SYMBOL_ENUM_NAME(getDoubleVolatile_name):
1376 case vmSymbols::VM_SYMBOL_ENUM_NAME(putObjectVolatile_name):
1377 case vmSymbols::VM_SYMBOL_ENUM_NAME(putBooleanVolatile_name):
1378 case vmSymbols::VM_SYMBOL_ENUM_NAME(putByteVolatile_name):
1379 case vmSymbols::VM_SYMBOL_ENUM_NAME(putShortVolatile_name):
1380 case vmSymbols::VM_SYMBOL_ENUM_NAME(putCharVolatile_name):
1381 case vmSymbols::VM_SYMBOL_ENUM_NAME(putIntVolatile_name):
1382 case vmSymbols::VM_SYMBOL_ENUM_NAME(putLongVolatile_name):
1383 case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloatVolatile_name):
1384 case vmSymbols::VM_SYMBOL_ENUM_NAME(putDoubleVolatile_name):
1385 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAddress_name):
1386 case vmSymbols::VM_SYMBOL_ENUM_NAME(putAddress_name):
1387 case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapObject_name):
1388 case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapLong_name):
1389 case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapInt_name):
1390 case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedObject_name):
1391 case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedLong_name):
1392 case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedInt_name):
1393 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddInt_name):
1394 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddLong_name):
1395 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetInt_name):
1396 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetLong_name):
1397 case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetObject_name):
1398 case vmSymbols::VM_SYMBOL_ENUM_NAME(park_name):
1399 case vmSymbols::VM_SYMBOL_ENUM_NAME(unpark_name):
1400 return true;
1401 }
1402
1403 return false;
1404 }
1405
1406 void Method::init_intrinsic_id() {
1407 assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
1408 const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
1409 assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
1410 assert(intrinsic_id_size_in_bytes() == sizeof(_intrinsic_id), "");
1411
1412 // the klass name is well-known:
1413 vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
1414 assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
1415
1416 // ditto for method and signature:
1417 vmSymbols::SID name_id = vmSymbols::find_sid(name());
1418 if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
1419 && name_id == vmSymbols::NO_SID)
1420 return;
1421 vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
1422 if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
1423 && sig_id == vmSymbols::NO_SID) return;
1424 jshort flags = access_flags().as_short();
1425
1426 vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
1427 if (id != vmIntrinsics::_none) {
1428 set_intrinsic_id(id);
1429 if (id == vmIntrinsics::_Class_cast) {
1430 // Even if the intrinsic is rejected, we want to inline this simple method.
1431 set_force_inline(true);
1432 }
1433 return;
1434 }
1435
1436 // A few slightly irregular cases:
1437 switch (klass_id) {
1438 case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
1439 // Second chance: check in regular Math.
1440 switch (name_id) {
1441 case vmSymbols::VM_SYMBOL_ENUM_NAME(min_name):
1442 case vmSymbols::VM_SYMBOL_ENUM_NAME(max_name):
1443 case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
1444 // pretend it is the corresponding method in the non-strict class:
1445 klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
1446 id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
1447 break;
1448 }
1449 break;
1450
1451 // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
1452 case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle):
1453 if (!is_native()) break;
1454 id = MethodHandles::signature_polymorphic_name_id(method_holder(), name());
1455 if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
1456 id = vmIntrinsics::_none;
1457 break;
1458
1459 case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Unsafe):
1460 // Map sun.misc.Unsafe to jdk.internal.misc.Unsafe
1461 if (!is_unsafe_alias(name_id)) break;
1462 // pretend it is the corresponding method in the internal Unsafe class:
1463 klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_misc_Unsafe);
1464 id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
1465 break;
1466 }
1467
1468 if (id != vmIntrinsics::_none) {
1469 // Set up its iid. It is an alias method.
1470 set_intrinsic_id(id);
1471 return;
1472 }
1473 }
1474
1475 // These two methods are static since a GC may move the Method
1476 bool Method::load_signature_classes(methodHandle m, TRAPS) {
1477 if (!THREAD->can_call_java()) {
1478 // There is nothing useful this routine can do from within the Compile thread.
1479 // Hopefully, the signature contains only well-known classes.
1480 // We could scan for this and return true/false, but the caller won't care.
1481 return false;
1482 }
1483 bool sig_is_loaded = true;
1484 Handle class_loader(THREAD, m->method_holder()->class_loader());
1485 Handle protection_domain(THREAD, m->method_holder()->protection_domain());
1486 ResourceMark rm(THREAD);
1487 Symbol* signature = m->signature();
1488 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
1489 if (ss.is_object()) {
1490 Symbol* sym = ss.as_symbol(CHECK_(false));
1491 Symbol* name = sym;
1492 Klass* klass = SystemDictionary::resolve_or_null(name, class_loader,
1493 protection_domain, THREAD);
1494 // We are loading classes eagerly. If a ClassNotFoundException or
1495 // a LinkageError was generated, be sure to ignore it.
1496 if (HAS_PENDING_EXCEPTION) {
1497 if (PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass()) ||
1498 PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
1499 CLEAR_PENDING_EXCEPTION;
1500 } else {
1501 return false;
1502 }
1503 }
1504 if( klass == NULL) { sig_is_loaded = false; }
1505 }
1506 }
1507 return sig_is_loaded;
1508 }
1509
1510 bool Method::has_unloaded_classes_in_signature(methodHandle m, TRAPS) {
1511 Handle class_loader(THREAD, m->method_holder()->class_loader());
1512 Handle protection_domain(THREAD, m->method_holder()->protection_domain());
1513 ResourceMark rm(THREAD);
1514 Symbol* signature = m->signature();
1515 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
1516 if (ss.type() == T_OBJECT) {
1517 Symbol* name = ss.as_symbol_or_null();
1518 if (name == NULL) return true;
1519 Klass* klass = SystemDictionary::find(name, class_loader, protection_domain, THREAD);
1520 if (klass == NULL) return true;
1521 }
1522 }
1523 return false;
1524 }
1525
1526 // Exposed so field engineers can debug VM
1527 void Method::print_short_name(outputStream* st) {
1528 ResourceMark rm;
1529 #ifdef PRODUCT
1530 st->print(" %s::", method_holder()->external_name());
1531 #else
1532 st->print(" %s::", method_holder()->internal_name());
1533 #endif
1534 name()->print_symbol_on(st);
1535 if (WizardMode) signature()->print_symbol_on(st);
1536 else if (MethodHandles::is_signature_polymorphic(intrinsic_id()))
1537 MethodHandles::print_as_basic_type_signature_on(st, signature(), true);
1538 }
1539
1540 // Comparer for sorting an object array containing
1541 // Method*s.
1542 static int method_comparator(Method* a, Method* b) {
1543 return a->name()->fast_compare(b->name());
1544 }
1545
1546 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
1547 // default_methods also uses this without the ordering for fast find_method
1548 void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums) {
1549 int length = methods->length();
1550 if (length > 1) {
1551 {
1552 NoSafepointVerifier nsv;
1553 QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
1554 }
1555 // Reset method ordering
1556 if (set_idnums) {
1557 for (int i = 0; i < length; i++) {
1558 Method* m = methods->at(i);
1559 m->set_method_idnum(i);
1560 m->set_orig_method_idnum(i);
1561 }
1562 }
1563 }
1564 }
1565
1566 //-----------------------------------------------------------------------------------
1567 // Non-product code unless JVM/TI needs it
1568
1569 #if !defined(PRODUCT) || INCLUDE_JVMTI
1570 class SignatureTypePrinter : public SignatureTypeNames {
1571 private:
1572 outputStream* _st;
1573 bool _use_separator;
1574
1575 void type_name(const char* name) {
1576 if (_use_separator) _st->print(", ");
1577 _st->print("%s", name);
1578 _use_separator = true;
1579 }
1580
1581 public:
1582 SignatureTypePrinter(Symbol* signature, outputStream* st) : SignatureTypeNames(signature) {
1583 _st = st;
1584 _use_separator = false;
1585 }
1586
1587 void print_parameters() { _use_separator = false; iterate_parameters(); }
1588 void print_returntype() { _use_separator = false; iterate_returntype(); }
1589 };
1590
1591
1592 void Method::print_name(outputStream* st) {
1593 Thread *thread = Thread::current();
1594 ResourceMark rm(thread);
1595 st->print("%s ", is_static() ? "static" : "virtual");
1596 if (WizardMode) {
1597 st->print("%s.", method_holder()->internal_name());
1598 name()->print_symbol_on(st);
1599 signature()->print_symbol_on(st);
1600 } else {
1601 SignatureTypePrinter sig(signature(), st);
1602 sig.print_returntype();
1603 st->print(" %s.", method_holder()->internal_name());
1604 name()->print_symbol_on(st);
1605 st->print("(");
1606 sig.print_parameters();
1607 st->print(")");
1608 }
1609 }
1610 #endif // !PRODUCT || INCLUDE_JVMTI
1611
1612
1613 void Method::print_codes_on(outputStream* st) const {
1614 print_codes_on(0, code_size(), st);
1615 }
1616
1617 void Method::print_codes_on(int from, int to, outputStream* st) const {
1618 Thread *thread = Thread::current();
1619 ResourceMark rm(thread);
1620 methodHandle mh (thread, (Method*)this);
1621 BytecodeStream s(mh);
1622 s.set_interval(from, to);
1623 BytecodeTracer::set_closure(BytecodeTracer::std_closure());
1624 while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
1625 }
1626
1627
1628 // Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas
1629 // between (bci,line) pairs since they are smaller. If (bci delta, line delta) fits in (5-bit unsigned, 3-bit unsigned)
1630 // we save it as one byte, otherwise we write a 0xFF escape character and use regular compression. 0x0 is used
1631 // as end-of-stream terminator.
1632
1633 void CompressedLineNumberWriteStream::write_pair_regular(int bci_delta, int line_delta) {
1634 // bci and line number does not compress into single byte.
1635 // Write out escape character and use regular compression for bci and line number.
1636 write_byte((jubyte)0xFF);
1637 write_signed_int(bci_delta);
1638 write_signed_int(line_delta);
1639 }
1640
1641 // See comment in method.hpp which explains why this exists.
1642 #if defined(_M_AMD64) && _MSC_VER >= 1400
1643 #pragma optimize("", off)
1644 void CompressedLineNumberWriteStream::write_pair(int bci, int line) {
1645 write_pair_inline(bci, line);
1646 }
1647 #pragma optimize("", on)
1648 #endif
1649
1650 CompressedLineNumberReadStream::CompressedLineNumberReadStream(u_char* buffer) : CompressedReadStream(buffer) {
1651 _bci = 0;
1652 _line = 0;
1653 };
1654
1655
1656 bool CompressedLineNumberReadStream::read_pair() {
1657 jubyte next = read_byte();
1658 // Check for terminator
1659 if (next == 0) return false;
1660 if (next == 0xFF) {
1661 // Escape character, regular compression used
1662 _bci += read_signed_int();
1663 _line += read_signed_int();
1664 } else {
1665 // Single byte compression used
1666 _bci += next >> 3;
1667 _line += next & 0x7;
1668 }
1669 return true;
1670 }
1671
1672
1673 Bytecodes::Code Method::orig_bytecode_at(int bci) const {
1674 BreakpointInfo* bp = method_holder()->breakpoints();
1675 for (; bp != NULL; bp = bp->next()) {
1676 if (bp->match(this, bci)) {
1677 return bp->orig_bytecode();
1678 }
1679 }
1680 {
1681 ResourceMark rm;
1682 fatal("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci);
1683 }
1684 return Bytecodes::_shouldnotreachhere;
1685 }
1686
1687 void Method::set_orig_bytecode_at(int bci, Bytecodes::Code code) {
1688 assert(code != Bytecodes::_breakpoint, "cannot patch breakpoints this way");
1689 BreakpointInfo* bp = method_holder()->breakpoints();
1690 for (; bp != NULL; bp = bp->next()) {
1691 if (bp->match(this, bci)) {
1692 bp->set_orig_bytecode(code);
1693 // and continue, in case there is more than one
1694 }
1695 }
1696 }
1697
1698 void Method::set_breakpoint(int bci) {
1699 InstanceKlass* ik = method_holder();
1700 BreakpointInfo *bp = new BreakpointInfo(this, bci);
1701 bp->set_next(ik->breakpoints());
1702 ik->set_breakpoints(bp);
1703 // do this last:
1704 bp->set(this);
1705 }
1706
1707 static void clear_matches(Method* m, int bci) {
1708 InstanceKlass* ik = m->method_holder();
1709 BreakpointInfo* prev_bp = NULL;
1710 BreakpointInfo* next_bp;
1711 for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = next_bp) {
1712 next_bp = bp->next();
1713 // bci value of -1 is used to delete all breakpoints in method m (ex: clear_all_breakpoint).
1714 if (bci >= 0 ? bp->match(m, bci) : bp->match(m)) {
1715 // do this first:
1716 bp->clear(m);
1717 // unhook it
1718 if (prev_bp != NULL)
1719 prev_bp->set_next(next_bp);
1720 else
1721 ik->set_breakpoints(next_bp);
1722 delete bp;
1723 // When class is redefined JVMTI sets breakpoint in all versions of EMCP methods
1724 // at same location. So we have multiple matching (method_index and bci)
1725 // BreakpointInfo nodes in BreakpointInfo list. We should just delete one
1726 // breakpoint for clear_breakpoint request and keep all other method versions
1727 // BreakpointInfo for future clear_breakpoint request.
1728 // bcivalue of -1 is used to clear all breakpoints (see clear_all_breakpoints)
1729 // which is being called when class is unloaded. We delete all the Breakpoint
1730 // information for all versions of method. We may not correctly restore the original
1731 // bytecode in all method versions, but that is ok. Because the class is being unloaded
1732 // so these methods won't be used anymore.
1733 if (bci >= 0) {
1734 break;
1735 }
1736 } else {
1737 // This one is a keeper.
1738 prev_bp = bp;
1739 }
1740 }
1741 }
1742
1743 void Method::clear_breakpoint(int bci) {
1744 assert(bci >= 0, "");
1745 clear_matches(this, bci);
1746 }
1747
1748 void Method::clear_all_breakpoints() {
1749 clear_matches(this, -1);
1750 }
1751
1752
1753 int Method::invocation_count() {
1754 MethodCounters *mcs = method_counters();
1755 if (TieredCompilation) {
1756 MethodData* const mdo = method_data();
1757 if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) ||
1758 ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
1759 return InvocationCounter::count_limit;
1760 } else {
1761 return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) +
1762 ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
1763 }
1764 } else {
1765 return (mcs == NULL) ? 0 : mcs->invocation_counter()->count();
1766 }
1767 }
1768
1769 int Method::backedge_count() {
1770 MethodCounters *mcs = method_counters();
1771 if (TieredCompilation) {
1772 MethodData* const mdo = method_data();
1773 if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) ||
1774 ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
1775 return InvocationCounter::count_limit;
1776 } else {
1777 return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) +
1778 ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
1779 }
1780 } else {
1781 return (mcs == NULL) ? 0 : mcs->backedge_counter()->count();
1782 }
1783 }
1784
1785 int Method::highest_comp_level() const {
1786 const MethodCounters* mcs = method_counters();
1787 if (mcs != NULL) {
1788 return mcs->highest_comp_level();
1789 } else {
1790 return CompLevel_none;
1791 }
1792 }
1793
1794 int Method::highest_osr_comp_level() const {
1795 const MethodCounters* mcs = method_counters();
1796 if (mcs != NULL) {
1797 return mcs->highest_osr_comp_level();
1798 } else {
1799 return CompLevel_none;
1800 }
1801 }
1802
1803 void Method::set_highest_comp_level(int level) {
1804 MethodCounters* mcs = method_counters();
1805 if (mcs != NULL) {
1806 mcs->set_highest_comp_level(level);
1807 }
1808 }
1809
1810 void Method::set_highest_osr_comp_level(int level) {
1811 MethodCounters* mcs = method_counters();
1812 if (mcs != NULL) {
1813 mcs->set_highest_osr_comp_level(level);
1814 }
1815 }
1816
1817 BreakpointInfo::BreakpointInfo(Method* m, int bci) {
1818 _bci = bci;
1819 _name_index = m->name_index();
1820 _signature_index = m->signature_index();
1821 _orig_bytecode = (Bytecodes::Code) *m->bcp_from(_bci);
1822 if (_orig_bytecode == Bytecodes::_breakpoint)
1823 _orig_bytecode = m->orig_bytecode_at(_bci);
1824 _next = NULL;
1825 }
1826
1827 void BreakpointInfo::set(Method* method) {
1828 #ifdef ASSERT
1829 {
1830 Bytecodes::Code code = (Bytecodes::Code) *method->bcp_from(_bci);
1831 if (code == Bytecodes::_breakpoint)
1832 code = method->orig_bytecode_at(_bci);
1833 assert(orig_bytecode() == code, "original bytecode must be the same");
1834 }
1835 #endif
1836 Thread *thread = Thread::current();
1837 *method->bcp_from(_bci) = Bytecodes::_breakpoint;
1838 method->incr_number_of_breakpoints(thread);
1839 SystemDictionary::notice_modification();
1840 {
1841 // Deoptimize all dependents on this method
1842 HandleMark hm(thread);
1843 methodHandle mh(thread, method);
1844 CodeCache::flush_dependents_on_method(mh);
1845 }
1846 }
1847
1848 void BreakpointInfo::clear(Method* method) {
1849 *method->bcp_from(_bci) = orig_bytecode();
1850 assert(method->number_of_breakpoints() > 0, "must not go negative");
1851 method->decr_number_of_breakpoints(Thread::current());
1852 }
1853
1854 // jmethodID handling
1855
1856 // This is a block allocating object, sort of like JNIHandleBlock, only a
1857 // lot simpler.
1858 // It's allocated on the CHeap because once we allocate a jmethodID, we can
1859 // never get rid of it.
1860
1861 static const int min_block_size = 8;
1862
1863 class JNIMethodBlockNode : public CHeapObj<mtClass> {
1864 friend class JNIMethodBlock;
1865 Method** _methods;
1866 int _number_of_methods;
1867 int _top;
1868 JNIMethodBlockNode* _next;
1869
1870 public:
1871
1872 JNIMethodBlockNode(int num_methods = min_block_size);
1873
1874 ~JNIMethodBlockNode() { FREE_C_HEAP_ARRAY(Method*, _methods); }
1875
1876 void ensure_methods(int num_addl_methods) {
1877 if (_top < _number_of_methods) {
1878 num_addl_methods -= _number_of_methods - _top;
1879 if (num_addl_methods <= 0) {
1880 return;
1881 }
1882 }
1883 if (_next == NULL) {
1884 _next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size));
1885 } else {
1886 _next->ensure_methods(num_addl_methods);
1887 }
1888 }
1889 };
1890
1891 class JNIMethodBlock : public CHeapObj<mtClass> {
1892 JNIMethodBlockNode _head;
1893 JNIMethodBlockNode *_last_free;
1894 public:
1895 static Method* const _free_method;
1896
1897 JNIMethodBlock(int initial_capacity = min_block_size)
1898 : _head(initial_capacity), _last_free(&_head) {}
1899
1900 void ensure_methods(int num_addl_methods) {
1901 _last_free->ensure_methods(num_addl_methods);
1902 }
1903
1904 Method** add_method(Method* m) {
1905 for (JNIMethodBlockNode* b = _last_free; b != NULL; b = b->_next) {
1906 if (b->_top < b->_number_of_methods) {
1907 // top points to the next free entry.
1908 int i = b->_top;
1909 b->_methods[i] = m;
1910 b->_top++;
1911 _last_free = b;
1912 return &(b->_methods[i]);
1913 } else if (b->_top == b->_number_of_methods) {
1914 // if the next free entry ran off the block see if there's a free entry
1915 for (int i = 0; i < b->_number_of_methods; i++) {
1916 if (b->_methods[i] == _free_method) {
1917 b->_methods[i] = m;
1918 _last_free = b;
1919 return &(b->_methods[i]);
1920 }
1921 }
1922 // Only check each block once for frees. They're very unlikely.
1923 // Increment top past the end of the block.
1924 b->_top++;
1925 }
1926 // need to allocate a next block.
1927 if (b->_next == NULL) {
1928 b->_next = _last_free = new JNIMethodBlockNode();
1929 }
1930 }
1931 guarantee(false, "Should always allocate a free block");
1932 return NULL;
1933 }
1934
1935 bool contains(Method** m) {
1936 if (m == NULL) return false;
1937 for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
1938 if (b->_methods <= m && m < b->_methods + b->_number_of_methods) {
1939 // This is a bit of extra checking, for two reasons. One is
1940 // that contains() deals with pointers that are passed in by
1941 // JNI code, so making sure that the pointer is aligned
1942 // correctly is valuable. The other is that <= and > are
1943 // technically not defined on pointers, so the if guard can
1944 // pass spuriously; no modern compiler is likely to make that
1945 // a problem, though (and if one did, the guard could also
1946 // fail spuriously, which would be bad).
1947 ptrdiff_t idx = m - b->_methods;
1948 if (b->_methods + idx == m) {
1949 return true;
1950 }
1951 }
1952 }
1953 return false; // not found
1954 }
1955
1956 // Doesn't really destroy it, just marks it as free so it can be reused.
1957 void destroy_method(Method** m) {
1958 #ifdef ASSERT
1959 assert(contains(m), "should be a methodID");
1960 #endif // ASSERT
1961 *m = _free_method;
1962 }
1963
1964 // During class unloading the methods are cleared, which is different
1965 // than freed.
1966 void clear_all_methods() {
1967 for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
1968 for (int i = 0; i< b->_number_of_methods; i++) {
1969 b->_methods[i] = NULL;
1970 }
1971 }
1972 }
1973 #ifndef PRODUCT
1974 int count_methods() {
1975 // count all allocated methods
1976 int count = 0;
1977 for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
1978 for (int i = 0; i< b->_number_of_methods; i++) {
1979 if (b->_methods[i] != _free_method) count++;
1980 }
1981 }
1982 return count;
1983 }
1984 #endif // PRODUCT
1985 };
1986
1987 // Something that can't be mistaken for an address or a markOop
1988 Method* const JNIMethodBlock::_free_method = (Method*)55;
1989
1990 JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _next(NULL), _top(0) {
1991 _number_of_methods = MAX2(num_methods, min_block_size);
1992 _methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal);
1993 for (int i = 0; i < _number_of_methods; i++) {
1994 _methods[i] = JNIMethodBlock::_free_method;
1995 }
1996 }
1997
1998 void Method::ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity) {
1999 ClassLoaderData* cld = loader_data;
2000 if (!SafepointSynchronize::is_at_safepoint()) {
2001 // Have to add jmethod_ids() to class loader data thread-safely.
2002 // Also have to add the method to the list safely, which the cld lock
2003 // protects as well.
2004 MutexLockerEx ml(cld->metaspace_lock(), Mutex::_no_safepoint_check_flag);
2005 if (cld->jmethod_ids() == NULL) {
2006 cld->set_jmethod_ids(new JNIMethodBlock(capacity));
2007 } else {
2008 cld->jmethod_ids()->ensure_methods(capacity);
2009 }
2010 } else {
2011 // At safepoint, we are single threaded and can set this.
2012 if (cld->jmethod_ids() == NULL) {
2013 cld->set_jmethod_ids(new JNIMethodBlock(capacity));
2014 } else {
2015 cld->jmethod_ids()->ensure_methods(capacity);
2016 }
2017 }
2018 }
2019
2020 // Add a method id to the jmethod_ids
2021 jmethodID Method::make_jmethod_id(ClassLoaderData* loader_data, Method* m) {
2022 ClassLoaderData* cld = loader_data;
2023
2024 if (!SafepointSynchronize::is_at_safepoint()) {
2025 // Have to add jmethod_ids() to class loader data thread-safely.
2026 // Also have to add the method to the list safely, which the cld lock
2027 // protects as well.
2028 MutexLockerEx ml(cld->metaspace_lock(), Mutex::_no_safepoint_check_flag);
2029 if (cld->jmethod_ids() == NULL) {
2030 cld->set_jmethod_ids(new JNIMethodBlock());
2031 }
2032 // jmethodID is a pointer to Method*
2033 return (jmethodID)cld->jmethod_ids()->add_method(m);
2034 } else {
2035 // At safepoint, we are single threaded and can set this.
2036 if (cld->jmethod_ids() == NULL) {
2037 cld->set_jmethod_ids(new JNIMethodBlock());
2038 }
2039 // jmethodID is a pointer to Method*
2040 return (jmethodID)cld->jmethod_ids()->add_method(m);
2041 }
2042 }
2043
2044 // Mark a jmethodID as free. This is called when there is a data race in
2045 // InstanceKlass while creating the jmethodID cache.
2046 void Method::destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID m) {
2047 ClassLoaderData* cld = loader_data;
2048 Method** ptr = (Method**)m;
2049 assert(cld->jmethod_ids() != NULL, "should have method handles");
2050 cld->jmethod_ids()->destroy_method(ptr);
2051 }
2052
2053 void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method) {
2054 // Can't assert the method_holder is the same because the new method has the
2055 // scratch method holder.
2056 assert(resolve_jmethod_id(jmid)->method_holder()->class_loader()
2057 == new_method->method_holder()->class_loader(),
2058 "changing to a different class loader");
2059 // Just change the method in place, jmethodID pointer doesn't change.
2060 *((Method**)jmid) = new_method;
2061 }
2062
2063 bool Method::is_method_id(jmethodID mid) {
2064 Method* m = resolve_jmethod_id(mid);
2065 assert(m != NULL, "should be called with non-null method");
2066 InstanceKlass* ik = m->method_holder();
2067 ClassLoaderData* cld = ik->class_loader_data();
2068 if (cld->jmethod_ids() == NULL) return false;
2069 return (cld->jmethod_ids()->contains((Method**)mid));
2070 }
2071
2072 Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
2073 if (mid == NULL) return NULL;
2074 Method* o = resolve_jmethod_id(mid);
2075 if (o == NULL || o == JNIMethodBlock::_free_method || !((Metadata*)o)->is_method()) {
2076 return NULL;
2077 }
2078 return o;
2079 };
2080
2081 void Method::set_on_stack(const bool value) {
2082 // Set both the method itself and its constant pool. The constant pool
2083 // on stack means some method referring to it is also on the stack.
2084 constants()->set_on_stack(value);
2085
2086 bool already_set = on_stack();
2087 _access_flags.set_on_stack(value);
2088 if (value && !already_set) {
2089 MetadataOnStackMark::record(this);
2090 }
2091 }
2092
2093 // Called when the class loader is unloaded to make all methods weak.
2094 void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
2095 loader_data->jmethod_ids()->clear_all_methods();
2096 }
2097
2098 bool Method::has_method_vptr(const void* ptr) {
2099 Method m;
2100 // This assumes that the vtbl pointer is the first word of a C++ object.
2101 // This assumption is also in universe.cpp patch_klass_vtble
2102 void* vtbl2 = dereference_vptr((const void*)&m);
2103 void* this_vtbl = dereference_vptr(ptr);
2104 return vtbl2 == this_vtbl;
2105 }
2106
2107 // Check that this pointer is valid by checking that the vtbl pointer matches
2108 bool Method::is_valid_method() const {
2109 if (this == NULL) {
2110 return false;
2111 } else if (!is_metaspace_object()) {
2112 return false;
2113 } else {
2114 return has_method_vptr((const void*)this);
2115 }
2116 }
2117
2118 #ifndef PRODUCT
2119 void Method::print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) {
2120 out->print_cr("jni_method_id count = %d", loader_data->jmethod_ids()->count_methods());
2121 }
2122 #endif // PRODUCT
2123
2124
2125 // Printing
2126
2127 #ifndef PRODUCT
2128
2129 void Method::print_on(outputStream* st) const {
2130 ResourceMark rm;
2131 assert(is_method(), "must be method");
2132 st->print_cr("%s", internal_name());
2133 // get the effect of PrintOopAddress, always, for methods:
2134 st->print_cr(" - this oop: " INTPTR_FORMAT, p2i(this));
2135 st->print (" - method holder: "); method_holder()->print_value_on(st); st->cr();
2136 st->print (" - constants: " INTPTR_FORMAT " ", p2i(constants()));
2137 constants()->print_value_on(st); st->cr();
2138 st->print (" - access: 0x%x ", access_flags().as_int()); access_flags().print_on(st); st->cr();
2139 st->print (" - name: "); name()->print_value_on(st); st->cr();
2140 st->print (" - signature: "); signature()->print_value_on(st); st->cr();
2141 st->print_cr(" - max stack: %d", max_stack());
2142 st->print_cr(" - max locals: %d", max_locals());
2143 st->print_cr(" - size of params: %d", size_of_parameters());
2144 st->print_cr(" - method size: %d", method_size());
2145 if (intrinsic_id() != vmIntrinsics::_none)
2146 st->print_cr(" - intrinsic id: %d %s", intrinsic_id(), vmIntrinsics::name_at(intrinsic_id()));
2147 if (highest_comp_level() != CompLevel_none)
2148 st->print_cr(" - highest level: %d", highest_comp_level());
2149 st->print_cr(" - vtable index: %d", _vtable_index);
2150 st->print_cr(" - i2i entry: " INTPTR_FORMAT, p2i(interpreter_entry()));
2151 st->print( " - adapters: ");
2152 AdapterHandlerEntry* a = ((Method*)this)->adapter();
2153 if (a == NULL)
2154 st->print_cr(INTPTR_FORMAT, p2i(a));
2155 else
2156 a->print_adapter_on(st);
2157 st->print_cr(" - compiled entry " INTPTR_FORMAT, p2i(from_compiled_entry()));
2158 st->print_cr(" - code size: %d", code_size());
2159 if (code_size() != 0) {
2160 st->print_cr(" - code start: " INTPTR_FORMAT, p2i(code_base()));
2161 st->print_cr(" - code end (excl): " INTPTR_FORMAT, p2i(code_base() + code_size()));
2162 }
2163 if (method_data() != NULL) {
2164 st->print_cr(" - method data: " INTPTR_FORMAT, p2i(method_data()));
2165 }
2166 st->print_cr(" - checked ex length: %d", checked_exceptions_length());
2167 if (checked_exceptions_length() > 0) {
2168 CheckedExceptionElement* table = checked_exceptions_start();
2169 st->print_cr(" - checked ex start: " INTPTR_FORMAT, p2i(table));
2170 if (Verbose) {
2171 for (int i = 0; i < checked_exceptions_length(); i++) {
2172 st->print_cr(" - throws %s", constants()->printable_name_at(table[i].class_cp_index));
2173 }
2174 }
2175 }
2176 if (has_linenumber_table()) {
2177 u_char* table = compressed_linenumber_table();
2178 st->print_cr(" - linenumber start: " INTPTR_FORMAT, p2i(table));
2179 if (Verbose) {
2180 CompressedLineNumberReadStream stream(table);
2181 while (stream.read_pair()) {
2182 st->print_cr(" - line %d: %d", stream.line(), stream.bci());
2183 }
2184 }
2185 }
2186 st->print_cr(" - localvar length: %d", localvariable_table_length());
2187 if (localvariable_table_length() > 0) {
2188 LocalVariableTableElement* table = localvariable_table_start();
2189 st->print_cr(" - localvar start: " INTPTR_FORMAT, p2i(table));
2190 if (Verbose) {
2191 for (int i = 0; i < localvariable_table_length(); i++) {
2192 int bci = table[i].start_bci;
2193 int len = table[i].length;
2194 const char* name = constants()->printable_name_at(table[i].name_cp_index);
2195 const char* desc = constants()->printable_name_at(table[i].descriptor_cp_index);
2196 int slot = table[i].slot;
2197 st->print_cr(" - %s %s bci=%d len=%d slot=%d", desc, name, bci, len, slot);
2198 }
2199 }
2200 }
2201 if (code() != NULL) {
2202 st->print (" - compiled code: ");
2203 code()->print_value_on(st);
2204 }
2205 if (is_native()) {
2206 st->print_cr(" - native function: " INTPTR_FORMAT, p2i(native_function()));
2207 st->print_cr(" - signature handler: " INTPTR_FORMAT, p2i(signature_handler()));
2208 }
2209 }
2210
2211 void Method::print_linkage_flags(outputStream* st) {
2212 access_flags().print_on(st);
2213 if (is_default_method()) {
2214 st->print("default ");
2215 }
2216 if (is_overpass()) {
2217 st->print("overpass ");
2218 }
2219 }
2220 #endif //PRODUCT
2221
2222 void Method::print_value_on(outputStream* st) const {
2223 assert(is_method(), "must be method");
2224 st->print("%s", internal_name());
2225 print_address_on(st);
2226 st->print(" ");
2227 name()->print_value_on(st);
2228 st->print(" ");
2229 signature()->print_value_on(st);
2230 st->print(" in ");
2231 method_holder()->print_value_on(st);
2232 if (WizardMode) st->print("#%d", _vtable_index);
2233 if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
2234 if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
2235 }
2236
2237 #if INCLUDE_SERVICES
2238 // Size Statistics
2239 void Method::collect_statistics(KlassSizeStats *sz) const {
2240 int mysize = sz->count(this);
2241 sz->_method_bytes += mysize;
2242 sz->_method_all_bytes += mysize;
2243 sz->_rw_bytes += mysize;
2244
2245 if (constMethod()) {
2246 constMethod()->collect_statistics(sz);
2247 }
2248 if (method_data()) {
2249 method_data()->collect_statistics(sz);
2250 }
2251 }
2252 #endif // INCLUDE_SERVICES
2253
2254 // LogTouchedMethods and PrintTouchedMethods
2255
2256 // TouchedMethodRecord -- we can't use a HashtableEntry<Method*> because
2257 // the Method may be garbage collected. Let's roll our own hash table.
2258 class TouchedMethodRecord : CHeapObj<mtTracing> {
2259 public:
2260 // It's OK to store Symbols here because they will NOT be GC'ed if
2261 // LogTouchedMethods is enabled.
2262 TouchedMethodRecord* _next;
2263 Symbol* _class_name;
2264 Symbol* _method_name;
2265 Symbol* _method_signature;
2266 };
2267
2268 static const int TOUCHED_METHOD_TABLE_SIZE = 20011;
2269 static TouchedMethodRecord** _touched_method_table = NULL;
2270
2271 void Method::log_touched(TRAPS) {
2272
2273 const int table_size = TOUCHED_METHOD_TABLE_SIZE;
2274 Symbol* my_class = klass_name();
2275 Symbol* my_name = name();
2276 Symbol* my_sig = signature();
2277
2278 unsigned int hash = my_class->identity_hash() +
2279 my_name->identity_hash() +
2280 my_sig->identity_hash();
2281 juint index = juint(hash) % table_size;
2282
2283 MutexLocker ml(TouchedMethodLog_lock, THREAD);
2284 if (_touched_method_table == NULL) {
2285 _touched_method_table = NEW_C_HEAP_ARRAY2(TouchedMethodRecord*, table_size,
2286 mtTracing, CURRENT_PC);
2287 memset(_touched_method_table, 0, sizeof(TouchedMethodRecord*)*table_size);
2288 }
2289
2290 TouchedMethodRecord* ptr = _touched_method_table[index];
2291 while (ptr) {
2292 if (ptr->_class_name == my_class &&
2293 ptr->_method_name == my_name &&
2294 ptr->_method_signature == my_sig) {
2295 return;
2296 }
2297 if (ptr->_next == NULL) break;
2298 ptr = ptr->_next;
2299 }
2300 TouchedMethodRecord* nptr = NEW_C_HEAP_OBJ(TouchedMethodRecord, mtTracing);
2301 my_class->set_permanent(); // prevent reclaimed by GC
2302 my_name->set_permanent();
2303 my_sig->set_permanent();
2304 nptr->_class_name = my_class;
2305 nptr->_method_name = my_name;
2306 nptr->_method_signature = my_sig;
2307 nptr->_next = NULL;
2308
2309 if (ptr == NULL) {
2310 // first
2311 _touched_method_table[index] = nptr;
2312 } else {
2313 ptr->_next = nptr;
2314 }
2315 }
2316
2317 void Method::print_touched_methods(outputStream* out) {
2318 MutexLockerEx ml(Thread::current()->is_VM_thread() ? NULL : TouchedMethodLog_lock);
2319 out->print_cr("# Method::print_touched_methods version 1");
2320 if (_touched_method_table) {
2321 for (int i = 0; i < TOUCHED_METHOD_TABLE_SIZE; i++) {
2322 TouchedMethodRecord* ptr = _touched_method_table[i];
2323 while(ptr) {
2324 ptr->_class_name->print_symbol_on(out); out->print(".");
2325 ptr->_method_name->print_symbol_on(out); out->print(":");
2326 ptr->_method_signature->print_symbol_on(out); out->cr();
2327 ptr = ptr->_next;
2328 }
2329 }
2330 }
2331 }
2332
2333 // Verification
2334
2335 void Method::verify_on(outputStream* st) {
2336 guarantee(is_method(), "object must be method");
2337 guarantee(constants()->is_constantPool(), "should be constant pool");
2338 guarantee(constMethod()->is_constMethod(), "should be ConstMethod*");
2339 MethodData* md = method_data();
2340 guarantee(md == NULL ||
2341 md->is_methodData(), "should be method data");
2342 }
--- EOF ---