1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "compiler/compilerOracle.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "interpreter/bytecode.hpp" 30 #include "interpreter/bytecodeStream.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "memory/heapInspection.hpp" 33 #include "memory/metaspaceClosure.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/methodData.inline.hpp" 36 #include "prims/jvmtiRedefineClasses.hpp" 37 #include "runtime/arguments.hpp" 38 #include "runtime/compilationPolicy.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/orderAccess.inline.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/copy.hpp" 44 45 // ================================================================== 46 // DataLayout 47 // 48 // Overlay for generic profiling data. 49 50 // Some types of data layouts need a length field. 51 bool DataLayout::needs_array_len(u1 tag) { 52 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 53 } 54 55 // Perform generic initialization of the data. More specific 56 // initialization occurs in overrides of ProfileData::post_initialize. 57 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 58 _header._bits = (intptr_t)0; 59 _header._struct._tag = tag; 60 _header._struct._bci = bci; 61 for (int i = 0; i < cell_count; i++) { 62 set_cell_at(i, (intptr_t)0); 63 } 64 if (needs_array_len(tag)) { 65 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 66 } 67 if (tag == call_type_data_tag) { 68 CallTypeData::initialize(this, cell_count); 69 } else if (tag == virtual_call_type_data_tag) { 70 VirtualCallTypeData::initialize(this, cell_count); 71 } 72 } 73 74 void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) { 75 ResourceMark m; 76 data_in()->clean_weak_klass_links(cl); 77 } 78 79 80 // ================================================================== 81 // ProfileData 82 // 83 // A ProfileData object is created to refer to a section of profiling 84 // data in a structured way. 85 86 // Constructor for invalid ProfileData. 87 ProfileData::ProfileData() { 88 _data = NULL; 89 } 90 91 char* ProfileData::print_data_on_helper(const MethodData* md) const { 92 DataLayout* dp = md->extra_data_base(); 93 DataLayout* end = md->args_data_limit(); 94 stringStream ss; 95 for (;; dp = MethodData::next_extra(dp)) { 96 assert(dp < end, "moved past end of extra data"); 97 switch(dp->tag()) { 98 case DataLayout::speculative_trap_data_tag: 99 if (dp->bci() == bci()) { 100 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 101 int trap = data->trap_state(); 102 char buf[100]; 103 ss.print("trap/"); 104 data->method()->print_short_name(&ss); 105 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 106 } 107 break; 108 case DataLayout::bit_data_tag: 109 break; 110 case DataLayout::no_tag: 111 case DataLayout::arg_info_data_tag: 112 return ss.as_string(); 113 break; 114 default: 115 fatal("unexpected tag %d", dp->tag()); 116 } 117 } 118 return NULL; 119 } 120 121 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 122 print_data_on(st, print_data_on_helper(md)); 123 } 124 125 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 126 st->print("bci: %d", bci()); 127 st->fill_to(tab_width_one); 128 st->print("%s", name); 129 tab(st); 130 int trap = trap_state(); 131 if (trap != 0) { 132 char buf[100]; 133 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 134 } 135 if (extra != NULL) { 136 st->print("%s", extra); 137 } 138 int flags = data()->flags(); 139 if (flags != 0) { 140 st->print("flags(%d) ", flags); 141 } 142 } 143 144 void ProfileData::tab(outputStream* st, bool first) const { 145 st->fill_to(first ? tab_width_one : tab_width_two); 146 } 147 148 // ================================================================== 149 // BitData 150 // 151 // A BitData corresponds to a one-bit flag. This is used to indicate 152 // whether a checkcast bytecode has seen a null value. 153 154 155 void BitData::print_data_on(outputStream* st, const char* extra) const { 156 print_shared(st, "BitData", extra); 157 st->cr(); 158 } 159 160 // ================================================================== 161 // CounterData 162 // 163 // A CounterData corresponds to a simple counter. 164 165 void CounterData::print_data_on(outputStream* st, const char* extra) const { 166 print_shared(st, "CounterData", extra); 167 st->print_cr("count(%u)", count()); 168 } 169 170 // ================================================================== 171 // JumpData 172 // 173 // A JumpData is used to access profiling information for a direct 174 // branch. It is a counter, used for counting the number of branches, 175 // plus a data displacement, used for realigning the data pointer to 176 // the corresponding target bci. 177 178 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 179 assert(stream->bci() == bci(), "wrong pos"); 180 int target; 181 Bytecodes::Code c = stream->code(); 182 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 183 target = stream->dest_w(); 184 } else { 185 target = stream->dest(); 186 } 187 int my_di = mdo->dp_to_di(dp()); 188 int target_di = mdo->bci_to_di(target); 189 int offset = target_di - my_di; 190 set_displacement(offset); 191 } 192 193 void JumpData::print_data_on(outputStream* st, const char* extra) const { 194 print_shared(st, "JumpData", extra); 195 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 196 } 197 198 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 199 // Parameter profiling include the receiver 200 int args_count = include_receiver ? 1 : 0; 201 ResourceMark rm; 202 SignatureStream ss(signature); 203 args_count += ss.reference_parameter_count(); 204 args_count = MIN2(args_count, max); 205 return args_count * per_arg_cell_count; 206 } 207 208 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 209 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 210 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 211 const methodHandle m = stream->method(); 212 int bci = stream->bci(); 213 Bytecode_invoke inv(m, bci); 214 int args_cell = 0; 215 if (MethodData::profile_arguments_for_invoke(m, bci)) { 216 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 217 } 218 int ret_cell = 0; 219 if (MethodData::profile_return_for_invoke(m, bci) && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) { 220 ret_cell = ReturnTypeEntry::static_cell_count(); 221 } 222 int header_cell = 0; 223 if (args_cell + ret_cell > 0) { 224 header_cell = header_cell_count(); 225 } 226 227 return header_cell + args_cell + ret_cell; 228 } 229 230 class ArgumentOffsetComputer : public SignatureInfo { 231 private: 232 int _max; 233 GrowableArray<int> _offsets; 234 235 void set(int size, BasicType type) { _size += size; } 236 void do_object(int begin, int end) { 237 if (_offsets.length() < _max) { 238 _offsets.push(_size); 239 } 240 SignatureInfo::do_object(begin, end); 241 } 242 void do_array (int begin, int end) { 243 if (_offsets.length() < _max) { 244 _offsets.push(_size); 245 } 246 SignatureInfo::do_array(begin, end); 247 } 248 249 public: 250 ArgumentOffsetComputer(Symbol* signature, int max) 251 : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) { 252 } 253 254 int total() { lazy_iterate_parameters(); return _size; } 255 256 int off_at(int i) const { return _offsets.at(i); } 257 }; 258 259 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 260 ResourceMark rm; 261 int start = 0; 262 // Parameter profiling include the receiver 263 if (include_receiver && has_receiver) { 264 set_stack_slot(0, 0); 265 set_type(0, type_none()); 266 start += 1; 267 } 268 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 269 aos.total(); 270 for (int i = start; i < _number_of_entries; i++) { 271 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 272 set_type(i, type_none()); 273 } 274 } 275 276 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 277 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 278 Bytecode_invoke inv(stream->method(), stream->bci()); 279 280 SignatureStream ss(inv.signature()); 281 if (has_arguments()) { 282 #ifdef ASSERT 283 ResourceMark rm; 284 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 285 assert(count > 0, "room for args type but none found?"); 286 check_number_of_arguments(count); 287 #endif 288 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 289 } 290 291 if (has_return()) { 292 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); 293 _ret.post_initialize(); 294 } 295 } 296 297 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 298 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 299 Bytecode_invoke inv(stream->method(), stream->bci()); 300 301 if (has_arguments()) { 302 #ifdef ASSERT 303 ResourceMark rm; 304 SignatureStream ss(inv.signature()); 305 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 306 assert(count > 0, "room for args type but none found?"); 307 check_number_of_arguments(count); 308 #endif 309 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 310 } 311 312 if (has_return()) { 313 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); 314 _ret.post_initialize(); 315 } 316 } 317 318 bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) { 319 Klass* k = (Klass*)klass_part(p); 320 return k != NULL && k->is_loader_alive(is_alive_cl); 321 } 322 323 void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 324 for (int i = 0; i < _number_of_entries; i++) { 325 intptr_t p = type(i); 326 if (!is_loader_alive(is_alive_cl, p)) { 327 set_type(i, with_status((Klass*)NULL, p)); 328 } 329 } 330 } 331 332 void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 333 intptr_t p = type(); 334 if (!is_loader_alive(is_alive_cl, p)) { 335 set_type(with_status((Klass*)NULL, p)); 336 } 337 } 338 339 bool TypeEntriesAtCall::return_profiling_enabled() { 340 return MethodData::profile_return(); 341 } 342 343 bool TypeEntriesAtCall::arguments_profiling_enabled() { 344 return MethodData::profile_arguments(); 345 } 346 347 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 348 if (is_type_none(k)) { 349 st->print("none"); 350 } else if (is_type_unknown(k)) { 351 st->print("unknown"); 352 } else { 353 valid_klass(k)->print_value_on(st); 354 } 355 if (was_null_seen(k)) { 356 st->print(" (null seen)"); 357 } 358 } 359 360 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 361 for (int i = 0; i < _number_of_entries; i++) { 362 _pd->tab(st); 363 st->print("%d: stack(%u) ", i, stack_slot(i)); 364 print_klass(st, type(i)); 365 st->cr(); 366 } 367 } 368 369 void ReturnTypeEntry::print_data_on(outputStream* st) const { 370 _pd->tab(st); 371 print_klass(st, type()); 372 st->cr(); 373 } 374 375 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 376 CounterData::print_data_on(st, extra); 377 if (has_arguments()) { 378 tab(st, true); 379 st->print("argument types"); 380 _args.print_data_on(st); 381 } 382 if (has_return()) { 383 tab(st, true); 384 st->print("return type"); 385 _ret.print_data_on(st); 386 } 387 } 388 389 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 390 VirtualCallData::print_data_on(st, extra); 391 if (has_arguments()) { 392 tab(st, true); 393 st->print("argument types"); 394 _args.print_data_on(st); 395 } 396 if (has_return()) { 397 tab(st, true); 398 st->print("return type"); 399 _ret.print_data_on(st); 400 } 401 } 402 403 // ================================================================== 404 // ReceiverTypeData 405 // 406 // A ReceiverTypeData is used to access profiling information about a 407 // dynamic type check. It consists of a counter which counts the total times 408 // that the check is reached, and a series of (Klass*, count) pairs 409 // which are used to store a type profile for the receiver of the check. 410 411 void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 412 for (uint row = 0; row < row_limit(); row++) { 413 Klass* p = receiver(row); 414 if (p != NULL && !p->is_loader_alive(is_alive_cl)) { 415 clear_row(row); 416 } 417 } 418 } 419 420 #if INCLUDE_JVMCI 421 void VirtualCallData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 422 ReceiverTypeData::clean_weak_klass_links(is_alive_cl); 423 for (uint row = 0; row < method_row_limit(); row++) { 424 Method* p = method(row); 425 if (p != NULL && !p->method_holder()->is_loader_alive(is_alive_cl)) { 426 clear_method_row(row); 427 } 428 } 429 } 430 431 void VirtualCallData::clean_weak_method_links() { 432 ReceiverTypeData::clean_weak_method_links(); 433 for (uint row = 0; row < method_row_limit(); row++) { 434 Method* p = method(row); 435 if (p != NULL && !p->on_stack()) { 436 clear_method_row(row); 437 } 438 } 439 } 440 #endif // INCLUDE_JVMCI 441 442 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 443 uint row; 444 int entries = 0; 445 for (row = 0; row < row_limit(); row++) { 446 if (receiver(row) != NULL) entries++; 447 } 448 #if INCLUDE_JVMCI 449 st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries); 450 #else 451 st->print_cr("count(%u) entries(%u)", count(), entries); 452 #endif 453 int total = count(); 454 for (row = 0; row < row_limit(); row++) { 455 if (receiver(row) != NULL) { 456 total += receiver_count(row); 457 } 458 } 459 for (row = 0; row < row_limit(); row++) { 460 if (receiver(row) != NULL) { 461 tab(st); 462 receiver(row)->print_value_on(st); 463 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 464 } 465 } 466 } 467 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 468 print_shared(st, "ReceiverTypeData", extra); 469 print_receiver_data_on(st); 470 } 471 472 #if INCLUDE_JVMCI 473 void VirtualCallData::print_method_data_on(outputStream* st) const { 474 uint row; 475 int entries = 0; 476 for (row = 0; row < method_row_limit(); row++) { 477 if (method(row) != NULL) entries++; 478 } 479 tab(st); 480 st->print_cr("method_entries(%u)", entries); 481 int total = count(); 482 for (row = 0; row < method_row_limit(); row++) { 483 if (method(row) != NULL) { 484 total += method_count(row); 485 } 486 } 487 for (row = 0; row < method_row_limit(); row++) { 488 if (method(row) != NULL) { 489 tab(st); 490 method(row)->print_value_on(st); 491 st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total); 492 } 493 } 494 } 495 #endif // INCLUDE_JVMCI 496 497 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 498 print_shared(st, "VirtualCallData", extra); 499 print_receiver_data_on(st); 500 print_method_data_on(st); 501 } 502 503 // ================================================================== 504 // RetData 505 // 506 // A RetData is used to access profiling information for a ret bytecode. 507 // It is composed of a count of the number of times that the ret has 508 // been executed, followed by a series of triples of the form 509 // (bci, count, di) which count the number of times that some bci was the 510 // target of the ret and cache a corresponding displacement. 511 512 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 513 for (uint row = 0; row < row_limit(); row++) { 514 set_bci_displacement(row, -1); 515 set_bci(row, no_bci); 516 } 517 // release so other threads see a consistent state. bci is used as 518 // a valid flag for bci_displacement. 519 OrderAccess::release(); 520 } 521 522 // This routine needs to atomically update the RetData structure, so the 523 // caller needs to hold the RetData_lock before it gets here. Since taking 524 // the lock can block (and allow GC) and since RetData is a ProfileData is a 525 // wrapper around a derived oop, taking the lock in _this_ method will 526 // basically cause the 'this' pointer's _data field to contain junk after the 527 // lock. We require the caller to take the lock before making the ProfileData 528 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 529 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 530 // First find the mdp which corresponds to the return bci. 531 address mdp = h_mdo->bci_to_dp(return_bci); 532 533 // Now check to see if any of the cache slots are open. 534 for (uint row = 0; row < row_limit(); row++) { 535 if (bci(row) == no_bci) { 536 set_bci_displacement(row, mdp - dp()); 537 set_bci_count(row, DataLayout::counter_increment); 538 // Barrier to ensure displacement is written before the bci; allows 539 // the interpreter to read displacement without fear of race condition. 540 release_set_bci(row, return_bci); 541 break; 542 } 543 } 544 return mdp; 545 } 546 547 #ifdef CC_INTERP 548 DataLayout* RetData::advance(MethodData *md, int bci) { 549 return (DataLayout*) md->bci_to_dp(bci); 550 } 551 #endif // CC_INTERP 552 553 void RetData::print_data_on(outputStream* st, const char* extra) const { 554 print_shared(st, "RetData", extra); 555 uint row; 556 int entries = 0; 557 for (row = 0; row < row_limit(); row++) { 558 if (bci(row) != no_bci) entries++; 559 } 560 st->print_cr("count(%u) entries(%u)", count(), entries); 561 for (row = 0; row < row_limit(); row++) { 562 if (bci(row) != no_bci) { 563 tab(st); 564 st->print_cr("bci(%d: count(%u) displacement(%d))", 565 bci(row), bci_count(row), bci_displacement(row)); 566 } 567 } 568 } 569 570 // ================================================================== 571 // BranchData 572 // 573 // A BranchData is used to access profiling data for a two-way branch. 574 // It consists of taken and not_taken counts as well as a data displacement 575 // for the taken case. 576 577 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 578 assert(stream->bci() == bci(), "wrong pos"); 579 int target = stream->dest(); 580 int my_di = mdo->dp_to_di(dp()); 581 int target_di = mdo->bci_to_di(target); 582 int offset = target_di - my_di; 583 set_displacement(offset); 584 } 585 586 void BranchData::print_data_on(outputStream* st, const char* extra) const { 587 print_shared(st, "BranchData", extra); 588 st->print_cr("taken(%u) displacement(%d)", 589 taken(), displacement()); 590 tab(st); 591 st->print_cr("not taken(%u)", not_taken()); 592 } 593 594 // ================================================================== 595 // MultiBranchData 596 // 597 // A MultiBranchData is used to access profiling information for 598 // a multi-way branch (*switch bytecodes). It consists of a series 599 // of (count, displacement) pairs, which count the number of times each 600 // case was taken and specify the data displacment for each branch target. 601 602 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 603 int cell_count = 0; 604 if (stream->code() == Bytecodes::_tableswitch) { 605 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 606 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 607 } else { 608 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 609 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 610 } 611 return cell_count; 612 } 613 614 void MultiBranchData::post_initialize(BytecodeStream* stream, 615 MethodData* mdo) { 616 assert(stream->bci() == bci(), "wrong pos"); 617 int target; 618 int my_di; 619 int target_di; 620 int offset; 621 if (stream->code() == Bytecodes::_tableswitch) { 622 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 623 int len = sw.length(); 624 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 625 for (int count = 0; count < len; count++) { 626 target = sw.dest_offset_at(count) + bci(); 627 my_di = mdo->dp_to_di(dp()); 628 target_di = mdo->bci_to_di(target); 629 offset = target_di - my_di; 630 set_displacement_at(count, offset); 631 } 632 target = sw.default_offset() + bci(); 633 my_di = mdo->dp_to_di(dp()); 634 target_di = mdo->bci_to_di(target); 635 offset = target_di - my_di; 636 set_default_displacement(offset); 637 638 } else { 639 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 640 int npairs = sw.number_of_pairs(); 641 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 642 for (int count = 0; count < npairs; count++) { 643 LookupswitchPair pair = sw.pair_at(count); 644 target = pair.offset() + bci(); 645 my_di = mdo->dp_to_di(dp()); 646 target_di = mdo->bci_to_di(target); 647 offset = target_di - my_di; 648 set_displacement_at(count, offset); 649 } 650 target = sw.default_offset() + bci(); 651 my_di = mdo->dp_to_di(dp()); 652 target_di = mdo->bci_to_di(target); 653 offset = target_di - my_di; 654 set_default_displacement(offset); 655 } 656 } 657 658 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 659 print_shared(st, "MultiBranchData", extra); 660 st->print_cr("default_count(%u) displacement(%d)", 661 default_count(), default_displacement()); 662 int cases = number_of_cases(); 663 for (int i = 0; i < cases; i++) { 664 tab(st); 665 st->print_cr("count(%u) displacement(%d)", 666 count_at(i), displacement_at(i)); 667 } 668 } 669 670 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 671 print_shared(st, "ArgInfoData", extra); 672 int nargs = number_of_args(); 673 for (int i = 0; i < nargs; i++) { 674 st->print(" 0x%x", arg_modified(i)); 675 } 676 st->cr(); 677 } 678 679 int ParametersTypeData::compute_cell_count(Method* m) { 680 if (!MethodData::profile_parameters_for_method(m)) { 681 return 0; 682 } 683 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 684 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 685 if (obj_args > 0) { 686 return obj_args + 1; // 1 cell for array len 687 } 688 return 0; 689 } 690 691 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 692 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 693 } 694 695 bool ParametersTypeData::profiling_enabled() { 696 return MethodData::profile_parameters(); 697 } 698 699 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 700 st->print("parameter types"); // FIXME extra ignored? 701 _parameters.print_data_on(st); 702 } 703 704 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 705 print_shared(st, "SpeculativeTrapData", extra); 706 tab(st); 707 method()->print_short_name(st); 708 st->cr(); 709 } 710 711 // ================================================================== 712 // MethodData* 713 // 714 // A MethodData* holds information which has been collected about 715 // a method. 716 717 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 718 int size = MethodData::compute_allocation_size_in_words(method); 719 720 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 721 MethodData(method(), size, THREAD); 722 } 723 724 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 725 if (is_client_compilation_mode_vm()) { 726 return no_profile_data; 727 } 728 switch (code) { 729 case Bytecodes::_checkcast: 730 case Bytecodes::_instanceof: 731 case Bytecodes::_aastore: 732 if (TypeProfileCasts) { 733 return ReceiverTypeData::static_cell_count(); 734 } else { 735 return BitData::static_cell_count(); 736 } 737 case Bytecodes::_invokespecial: 738 case Bytecodes::_invokestatic: 739 if (MethodData::profile_arguments() || MethodData::profile_return()) { 740 return variable_cell_count; 741 } else { 742 return CounterData::static_cell_count(); 743 } 744 case Bytecodes::_goto: 745 case Bytecodes::_goto_w: 746 case Bytecodes::_jsr: 747 case Bytecodes::_jsr_w: 748 return JumpData::static_cell_count(); 749 case Bytecodes::_invokevirtual: 750 case Bytecodes::_invokeinterface: 751 if (MethodData::profile_arguments() || MethodData::profile_return()) { 752 return variable_cell_count; 753 } else { 754 return VirtualCallData::static_cell_count(); 755 } 756 case Bytecodes::_invokedynamic: 757 if (MethodData::profile_arguments() || MethodData::profile_return()) { 758 return variable_cell_count; 759 } else { 760 return CounterData::static_cell_count(); 761 } 762 case Bytecodes::_ret: 763 return RetData::static_cell_count(); 764 case Bytecodes::_ifeq: 765 case Bytecodes::_ifne: 766 case Bytecodes::_iflt: 767 case Bytecodes::_ifge: 768 case Bytecodes::_ifgt: 769 case Bytecodes::_ifle: 770 case Bytecodes::_if_icmpeq: 771 case Bytecodes::_if_icmpne: 772 case Bytecodes::_if_icmplt: 773 case Bytecodes::_if_icmpge: 774 case Bytecodes::_if_icmpgt: 775 case Bytecodes::_if_icmple: 776 case Bytecodes::_if_acmpeq: 777 case Bytecodes::_if_acmpne: 778 case Bytecodes::_ifnull: 779 case Bytecodes::_ifnonnull: 780 return BranchData::static_cell_count(); 781 case Bytecodes::_lookupswitch: 782 case Bytecodes::_tableswitch: 783 return variable_cell_count; 784 default: 785 return no_profile_data; 786 } 787 } 788 789 // Compute the size of the profiling information corresponding to 790 // the current bytecode. 791 int MethodData::compute_data_size(BytecodeStream* stream) { 792 int cell_count = bytecode_cell_count(stream->code()); 793 if (cell_count == no_profile_data) { 794 return 0; 795 } 796 if (cell_count == variable_cell_count) { 797 switch (stream->code()) { 798 case Bytecodes::_lookupswitch: 799 case Bytecodes::_tableswitch: 800 cell_count = MultiBranchData::compute_cell_count(stream); 801 break; 802 case Bytecodes::_invokespecial: 803 case Bytecodes::_invokestatic: 804 case Bytecodes::_invokedynamic: 805 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 806 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 807 profile_return_for_invoke(stream->method(), stream->bci())) { 808 cell_count = CallTypeData::compute_cell_count(stream); 809 } else { 810 cell_count = CounterData::static_cell_count(); 811 } 812 break; 813 case Bytecodes::_invokevirtual: 814 case Bytecodes::_invokeinterface: { 815 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 816 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 817 profile_return_for_invoke(stream->method(), stream->bci())) { 818 cell_count = VirtualCallTypeData::compute_cell_count(stream); 819 } else { 820 cell_count = VirtualCallData::static_cell_count(); 821 } 822 break; 823 } 824 default: 825 fatal("unexpected bytecode for var length profile data"); 826 } 827 } 828 // Note: cell_count might be zero, meaning that there is just 829 // a DataLayout header, with no extra cells. 830 assert(cell_count >= 0, "sanity"); 831 return DataLayout::compute_size_in_bytes(cell_count); 832 } 833 834 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 835 // Bytecodes for which we may use speculation 836 switch (code) { 837 case Bytecodes::_checkcast: 838 case Bytecodes::_instanceof: 839 case Bytecodes::_aastore: 840 case Bytecodes::_invokevirtual: 841 case Bytecodes::_invokeinterface: 842 case Bytecodes::_if_acmpeq: 843 case Bytecodes::_if_acmpne: 844 case Bytecodes::_ifnull: 845 case Bytecodes::_ifnonnull: 846 case Bytecodes::_invokestatic: 847 #ifdef COMPILER2 848 if (is_server_compilation_mode_vm()) { 849 return UseTypeSpeculation; 850 } 851 #endif 852 default: 853 return false; 854 } 855 return false; 856 } 857 858 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 859 #if INCLUDE_JVMCI 860 if (ProfileTraps) { 861 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 862 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 863 864 // Make sure we have a minimum number of extra data slots to 865 // allocate SpeculativeTrapData entries. We would want to have one 866 // entry per compilation that inlines this method and for which 867 // some type speculation assumption fails. So the room we need for 868 // the SpeculativeTrapData entries doesn't directly depend on the 869 // size of the method. Because it's hard to estimate, we reserve 870 // space for an arbitrary number of entries. 871 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 872 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 873 874 return MAX2(extra_data_count, spec_data_count); 875 } else { 876 return 0; 877 } 878 #else // INCLUDE_JVMCI 879 if (ProfileTraps) { 880 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 881 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 882 // If the method is large, let the extra BCIs grow numerous (to ~1%). 883 int one_percent_of_data 884 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 885 if (extra_data_count < one_percent_of_data) 886 extra_data_count = one_percent_of_data; 887 if (extra_data_count > empty_bc_count) 888 extra_data_count = empty_bc_count; // no need for more 889 890 // Make sure we have a minimum number of extra data slots to 891 // allocate SpeculativeTrapData entries. We would want to have one 892 // entry per compilation that inlines this method and for which 893 // some type speculation assumption fails. So the room we need for 894 // the SpeculativeTrapData entries doesn't directly depend on the 895 // size of the method. Because it's hard to estimate, we reserve 896 // space for an arbitrary number of entries. 897 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 898 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 899 900 return MAX2(extra_data_count, spec_data_count); 901 } else { 902 return 0; 903 } 904 #endif // INCLUDE_JVMCI 905 } 906 907 // Compute the size of the MethodData* necessary to store 908 // profiling information about a given method. Size is in bytes. 909 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 910 int data_size = 0; 911 BytecodeStream stream(method); 912 Bytecodes::Code c; 913 int empty_bc_count = 0; // number of bytecodes lacking data 914 bool needs_speculative_traps = false; 915 while ((c = stream.next()) >= 0) { 916 int size_in_bytes = compute_data_size(&stream); 917 data_size += size_in_bytes; 918 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 919 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 920 } 921 int object_size = in_bytes(data_offset()) + data_size; 922 923 // Add some extra DataLayout cells (at least one) to track stray traps. 924 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 925 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 926 927 // Add a cell to record information about modified arguments. 928 int arg_size = method->size_of_parameters(); 929 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 930 931 // Reserve room for an area of the MDO dedicated to profiling of 932 // parameters 933 int args_cell = ParametersTypeData::compute_cell_count(method()); 934 if (args_cell > 0) { 935 object_size += DataLayout::compute_size_in_bytes(args_cell); 936 } 937 return object_size; 938 } 939 940 // Compute the size of the MethodData* necessary to store 941 // profiling information about a given method. Size is in words 942 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 943 int byte_size = compute_allocation_size_in_bytes(method); 944 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 945 return align_metadata_size(word_size); 946 } 947 948 // Initialize an individual data segment. Returns the size of 949 // the segment in bytes. 950 int MethodData::initialize_data(BytecodeStream* stream, 951 int data_index) { 952 if (is_client_compilation_mode_vm()) { 953 return 0; 954 } 955 int cell_count = -1; 956 int tag = DataLayout::no_tag; 957 DataLayout* data_layout = data_layout_at(data_index); 958 Bytecodes::Code c = stream->code(); 959 switch (c) { 960 case Bytecodes::_checkcast: 961 case Bytecodes::_instanceof: 962 case Bytecodes::_aastore: 963 if (TypeProfileCasts) { 964 cell_count = ReceiverTypeData::static_cell_count(); 965 tag = DataLayout::receiver_type_data_tag; 966 } else { 967 cell_count = BitData::static_cell_count(); 968 tag = DataLayout::bit_data_tag; 969 } 970 break; 971 case Bytecodes::_invokespecial: 972 case Bytecodes::_invokestatic: { 973 int counter_data_cell_count = CounterData::static_cell_count(); 974 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 975 profile_return_for_invoke(stream->method(), stream->bci())) { 976 cell_count = CallTypeData::compute_cell_count(stream); 977 } else { 978 cell_count = counter_data_cell_count; 979 } 980 if (cell_count > counter_data_cell_count) { 981 tag = DataLayout::call_type_data_tag; 982 } else { 983 tag = DataLayout::counter_data_tag; 984 } 985 break; 986 } 987 case Bytecodes::_goto: 988 case Bytecodes::_goto_w: 989 case Bytecodes::_jsr: 990 case Bytecodes::_jsr_w: 991 cell_count = JumpData::static_cell_count(); 992 tag = DataLayout::jump_data_tag; 993 break; 994 case Bytecodes::_invokevirtual: 995 case Bytecodes::_invokeinterface: { 996 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 997 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 998 profile_return_for_invoke(stream->method(), stream->bci())) { 999 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1000 } else { 1001 cell_count = virtual_call_data_cell_count; 1002 } 1003 if (cell_count > virtual_call_data_cell_count) { 1004 tag = DataLayout::virtual_call_type_data_tag; 1005 } else { 1006 tag = DataLayout::virtual_call_data_tag; 1007 } 1008 break; 1009 } 1010 case Bytecodes::_invokedynamic: { 1011 // %%% should make a type profile for any invokedynamic that takes a ref argument 1012 int counter_data_cell_count = CounterData::static_cell_count(); 1013 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1014 profile_return_for_invoke(stream->method(), stream->bci())) { 1015 cell_count = CallTypeData::compute_cell_count(stream); 1016 } else { 1017 cell_count = counter_data_cell_count; 1018 } 1019 if (cell_count > counter_data_cell_count) { 1020 tag = DataLayout::call_type_data_tag; 1021 } else { 1022 tag = DataLayout::counter_data_tag; 1023 } 1024 break; 1025 } 1026 case Bytecodes::_ret: 1027 cell_count = RetData::static_cell_count(); 1028 tag = DataLayout::ret_data_tag; 1029 break; 1030 case Bytecodes::_ifeq: 1031 case Bytecodes::_ifne: 1032 case Bytecodes::_iflt: 1033 case Bytecodes::_ifge: 1034 case Bytecodes::_ifgt: 1035 case Bytecodes::_ifle: 1036 case Bytecodes::_if_icmpeq: 1037 case Bytecodes::_if_icmpne: 1038 case Bytecodes::_if_icmplt: 1039 case Bytecodes::_if_icmpge: 1040 case Bytecodes::_if_icmpgt: 1041 case Bytecodes::_if_icmple: 1042 case Bytecodes::_if_acmpeq: 1043 case Bytecodes::_if_acmpne: 1044 case Bytecodes::_ifnull: 1045 case Bytecodes::_ifnonnull: 1046 cell_count = BranchData::static_cell_count(); 1047 tag = DataLayout::branch_data_tag; 1048 break; 1049 case Bytecodes::_lookupswitch: 1050 case Bytecodes::_tableswitch: 1051 cell_count = MultiBranchData::compute_cell_count(stream); 1052 tag = DataLayout::multi_branch_data_tag; 1053 break; 1054 default: 1055 break; 1056 } 1057 assert(tag == DataLayout::multi_branch_data_tag || 1058 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1059 (tag == DataLayout::call_type_data_tag || 1060 tag == DataLayout::counter_data_tag || 1061 tag == DataLayout::virtual_call_type_data_tag || 1062 tag == DataLayout::virtual_call_data_tag)) || 1063 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1064 if (cell_count >= 0) { 1065 assert(tag != DataLayout::no_tag, "bad tag"); 1066 assert(bytecode_has_profile(c), "agree w/ BHP"); 1067 data_layout->initialize(tag, stream->bci(), cell_count); 1068 return DataLayout::compute_size_in_bytes(cell_count); 1069 } else { 1070 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1071 return 0; 1072 } 1073 } 1074 1075 // Get the data at an arbitrary (sort of) data index. 1076 ProfileData* MethodData::data_at(int data_index) const { 1077 if (out_of_bounds(data_index)) { 1078 return NULL; 1079 } 1080 DataLayout* data_layout = data_layout_at(data_index); 1081 return data_layout->data_in(); 1082 } 1083 1084 ProfileData* DataLayout::data_in() { 1085 switch (tag()) { 1086 case DataLayout::no_tag: 1087 default: 1088 ShouldNotReachHere(); 1089 return NULL; 1090 case DataLayout::bit_data_tag: 1091 return new BitData(this); 1092 case DataLayout::counter_data_tag: 1093 return new CounterData(this); 1094 case DataLayout::jump_data_tag: 1095 return new JumpData(this); 1096 case DataLayout::receiver_type_data_tag: 1097 return new ReceiverTypeData(this); 1098 case DataLayout::virtual_call_data_tag: 1099 return new VirtualCallData(this); 1100 case DataLayout::ret_data_tag: 1101 return new RetData(this); 1102 case DataLayout::branch_data_tag: 1103 return new BranchData(this); 1104 case DataLayout::multi_branch_data_tag: 1105 return new MultiBranchData(this); 1106 case DataLayout::arg_info_data_tag: 1107 return new ArgInfoData(this); 1108 case DataLayout::call_type_data_tag: 1109 return new CallTypeData(this); 1110 case DataLayout::virtual_call_type_data_tag: 1111 return new VirtualCallTypeData(this); 1112 case DataLayout::parameters_type_data_tag: 1113 return new ParametersTypeData(this); 1114 case DataLayout::speculative_trap_data_tag: 1115 return new SpeculativeTrapData(this); 1116 } 1117 } 1118 1119 // Iteration over data. 1120 ProfileData* MethodData::next_data(ProfileData* current) const { 1121 int current_index = dp_to_di(current->dp()); 1122 int next_index = current_index + current->size_in_bytes(); 1123 ProfileData* next = data_at(next_index); 1124 return next; 1125 } 1126 1127 // Give each of the data entries a chance to perform specific 1128 // data initialization. 1129 void MethodData::post_initialize(BytecodeStream* stream) { 1130 ResourceMark rm; 1131 ProfileData* data; 1132 for (data = first_data(); is_valid(data); data = next_data(data)) { 1133 stream->set_start(data->bci()); 1134 stream->next(); 1135 data->post_initialize(stream, this); 1136 } 1137 if (_parameters_type_data_di != no_parameters) { 1138 parameters_type_data()->post_initialize(NULL, this); 1139 } 1140 } 1141 1142 // Initialize the MethodData* corresponding to a given method. 1143 MethodData::MethodData(const methodHandle& method, int size, TRAPS) 1144 : _extra_data_lock(Monitor::leaf, "MDO extra data lock"), 1145 _parameters_type_data_di(parameters_uninitialized) { 1146 // Set the method back-pointer. 1147 _method = method(); 1148 initialize(); 1149 } 1150 1151 void MethodData::initialize() { 1152 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1153 ResourceMark rm; 1154 1155 init(); 1156 set_creation_mileage(mileage_of(method())); 1157 1158 // Go through the bytecodes and allocate and initialize the 1159 // corresponding data cells. 1160 int data_size = 0; 1161 int empty_bc_count = 0; // number of bytecodes lacking data 1162 _data[0] = 0; // apparently not set below. 1163 BytecodeStream stream(method()); 1164 Bytecodes::Code c; 1165 bool needs_speculative_traps = false; 1166 while ((c = stream.next()) >= 0) { 1167 int size_in_bytes = initialize_data(&stream, data_size); 1168 data_size += size_in_bytes; 1169 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1170 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1171 } 1172 _data_size = data_size; 1173 int object_size = in_bytes(data_offset()) + data_size; 1174 1175 // Add some extra DataLayout cells (at least one) to track stray traps. 1176 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1177 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1178 1179 // Let's zero the space for the extra data 1180 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1181 1182 // Add a cell to record information about modified arguments. 1183 // Set up _args_modified array after traps cells so that 1184 // the code for traps cells works. 1185 DataLayout *dp = data_layout_at(data_size + extra_size); 1186 1187 int arg_size = method()->size_of_parameters(); 1188 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1189 1190 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1191 object_size += extra_size + arg_data_size; 1192 1193 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1194 // If we are profiling parameters, we reserver an area near the end 1195 // of the MDO after the slots for bytecodes (because there's no bci 1196 // for method entry so they don't fit with the framework for the 1197 // profiling of bytecodes). We store the offset within the MDO of 1198 // this area (or -1 if no parameter is profiled) 1199 if (parms_cell > 0) { 1200 object_size += DataLayout::compute_size_in_bytes(parms_cell); 1201 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1202 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1203 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1204 } else { 1205 _parameters_type_data_di = no_parameters; 1206 } 1207 1208 // Set an initial hint. Don't use set_hint_di() because 1209 // first_di() may be out of bounds if data_size is 0. 1210 // In that situation, _hint_di is never used, but at 1211 // least well-defined. 1212 _hint_di = first_di(); 1213 1214 post_initialize(&stream); 1215 1216 assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size"); 1217 set_size(object_size); 1218 } 1219 1220 void MethodData::init() { 1221 _invocation_counter.init(); 1222 _backedge_counter.init(); 1223 _invocation_counter_start = 0; 1224 _backedge_counter_start = 0; 1225 1226 // Set per-method invoke- and backedge mask. 1227 double scale = 1.0; 1228 CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale); 1229 _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1230 _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1231 1232 _tenure_traps = 0; 1233 _num_loops = 0; 1234 _num_blocks = 0; 1235 _would_profile = unknown; 1236 1237 #if INCLUDE_JVMCI 1238 _jvmci_ir_size = 0; 1239 #endif 1240 1241 #if INCLUDE_RTM_OPT 1242 _rtm_state = NoRTM; // No RTM lock eliding by default 1243 if (UseRTMLocking && 1244 !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) { 1245 if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) { 1246 // Generate RTM lock eliding code without abort ratio calculation code. 1247 _rtm_state = UseRTM; 1248 } else if (UseRTMDeopt) { 1249 // Generate RTM lock eliding code and include abort ratio calculation 1250 // code if UseRTMDeopt is on. 1251 _rtm_state = ProfileRTM; 1252 } 1253 } 1254 #endif 1255 1256 // Initialize flags and trap history. 1257 _nof_decompiles = 0; 1258 _nof_overflow_recompiles = 0; 1259 _nof_overflow_traps = 0; 1260 clear_escape_info(); 1261 assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align"); 1262 Copy::zero_to_words((HeapWord*) &_trap_hist, 1263 sizeof(_trap_hist) / sizeof(HeapWord)); 1264 } 1265 1266 // Get a measure of how much mileage the method has on it. 1267 int MethodData::mileage_of(Method* method) { 1268 int mileage = 0; 1269 if (TieredCompilation) { 1270 mileage = MAX2(method->invocation_count(), method->backedge_count()); 1271 } else { 1272 int iic = method->interpreter_invocation_count(); 1273 if (mileage < iic) mileage = iic; 1274 MethodCounters* mcs = method->method_counters(); 1275 if (mcs != NULL) { 1276 InvocationCounter* ic = mcs->invocation_counter(); 1277 InvocationCounter* bc = mcs->backedge_counter(); 1278 int icval = ic->count(); 1279 if (ic->carry()) icval += CompileThreshold; 1280 if (mileage < icval) mileage = icval; 1281 int bcval = bc->count(); 1282 if (bc->carry()) bcval += CompileThreshold; 1283 if (mileage < bcval) mileage = bcval; 1284 } 1285 } 1286 return mileage; 1287 } 1288 1289 bool MethodData::is_mature() const { 1290 return CompilationPolicy::policy()->is_mature(_method); 1291 } 1292 1293 // Translate a bci to its corresponding data index (di). 1294 address MethodData::bci_to_dp(int bci) { 1295 ResourceMark rm; 1296 ProfileData* data = data_before(bci); 1297 ProfileData* prev = NULL; 1298 for ( ; is_valid(data); data = next_data(data)) { 1299 if (data->bci() >= bci) { 1300 if (data->bci() == bci) set_hint_di(dp_to_di(data->dp())); 1301 else if (prev != NULL) set_hint_di(dp_to_di(prev->dp())); 1302 return data->dp(); 1303 } 1304 prev = data; 1305 } 1306 return (address)limit_data_position(); 1307 } 1308 1309 // Translate a bci to its corresponding data, or NULL. 1310 ProfileData* MethodData::bci_to_data(int bci) { 1311 ProfileData* data = data_before(bci); 1312 for ( ; is_valid(data); data = next_data(data)) { 1313 if (data->bci() == bci) { 1314 set_hint_di(dp_to_di(data->dp())); 1315 return data; 1316 } else if (data->bci() > bci) { 1317 break; 1318 } 1319 } 1320 return bci_to_extra_data(bci, NULL, false); 1321 } 1322 1323 DataLayout* MethodData::next_extra(DataLayout* dp) { 1324 int nb_cells = 0; 1325 switch(dp->tag()) { 1326 case DataLayout::bit_data_tag: 1327 case DataLayout::no_tag: 1328 nb_cells = BitData::static_cell_count(); 1329 break; 1330 case DataLayout::speculative_trap_data_tag: 1331 nb_cells = SpeculativeTrapData::static_cell_count(); 1332 break; 1333 default: 1334 fatal("unexpected tag %d", dp->tag()); 1335 } 1336 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1337 } 1338 1339 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) { 1340 DataLayout* end = args_data_limit(); 1341 1342 for (;; dp = next_extra(dp)) { 1343 assert(dp < end, "moved past end of extra data"); 1344 // No need for "OrderAccess::load_acquire" ops, 1345 // since the data structure is monotonic. 1346 switch(dp->tag()) { 1347 case DataLayout::no_tag: 1348 return NULL; 1349 case DataLayout::arg_info_data_tag: 1350 dp = end; 1351 return NULL; // ArgInfoData is at the end of extra data section. 1352 case DataLayout::bit_data_tag: 1353 if (m == NULL && dp->bci() == bci) { 1354 return new BitData(dp); 1355 } 1356 break; 1357 case DataLayout::speculative_trap_data_tag: 1358 if (m != NULL) { 1359 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1360 // data->method() may be null in case of a concurrent 1361 // allocation. Maybe it's for the same method. Try to use that 1362 // entry in that case. 1363 if (dp->bci() == bci) { 1364 if (data->method() == NULL) { 1365 assert(concurrent, "impossible because no concurrent allocation"); 1366 return NULL; 1367 } else if (data->method() == m) { 1368 return data; 1369 } 1370 } 1371 } 1372 break; 1373 default: 1374 fatal("unexpected tag %d", dp->tag()); 1375 } 1376 } 1377 return NULL; 1378 } 1379 1380 1381 // Translate a bci to its corresponding extra data, or NULL. 1382 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1383 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1384 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1385 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1386 "code needs to be adjusted"); 1387 1388 // Do not create one of these if method has been redefined. 1389 if (m != NULL && m->is_old()) { 1390 return NULL; 1391 } 1392 1393 DataLayout* dp = extra_data_base(); 1394 DataLayout* end = args_data_limit(); 1395 1396 // Allocation in the extra data space has to be atomic because not 1397 // all entries have the same size and non atomic concurrent 1398 // allocation would result in a corrupted extra data space. 1399 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); 1400 if (result != NULL) { 1401 return result; 1402 } 1403 1404 if (create_if_missing && dp < end) { 1405 MutexLocker ml(&_extra_data_lock); 1406 // Check again now that we have the lock. Another thread may 1407 // have added extra data entries. 1408 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); 1409 if (result != NULL || dp >= end) { 1410 return result; 1411 } 1412 1413 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free"); 1414 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1415 u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1416 // SpeculativeTrapData is 2 slots. Make sure we have room. 1417 if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) { 1418 return NULL; 1419 } 1420 DataLayout temp; 1421 temp.initialize(tag, bci, 0); 1422 1423 dp->set_header(temp.header()); 1424 assert(dp->tag() == tag, "sane"); 1425 assert(dp->bci() == bci, "no concurrent allocation"); 1426 if (tag == DataLayout::bit_data_tag) { 1427 return new BitData(dp); 1428 } else { 1429 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1430 data->set_method(m); 1431 return data; 1432 } 1433 } 1434 return NULL; 1435 } 1436 1437 ArgInfoData *MethodData::arg_info() { 1438 DataLayout* dp = extra_data_base(); 1439 DataLayout* end = args_data_limit(); 1440 for (; dp < end; dp = next_extra(dp)) { 1441 if (dp->tag() == DataLayout::arg_info_data_tag) 1442 return new ArgInfoData(dp); 1443 } 1444 return NULL; 1445 } 1446 1447 // Printing 1448 1449 void MethodData::print_on(outputStream* st) const { 1450 assert(is_methodData(), "should be method data"); 1451 st->print("method data for "); 1452 method()->print_value_on(st); 1453 st->cr(); 1454 print_data_on(st); 1455 } 1456 1457 void MethodData::print_value_on(outputStream* st) const { 1458 assert(is_methodData(), "should be method data"); 1459 st->print("method data for "); 1460 method()->print_value_on(st); 1461 } 1462 1463 void MethodData::print_data_on(outputStream* st) const { 1464 ResourceMark rm; 1465 ProfileData* data = first_data(); 1466 if (_parameters_type_data_di != no_parameters) { 1467 parameters_type_data()->print_data_on(st); 1468 } 1469 for ( ; is_valid(data); data = next_data(data)) { 1470 st->print("%d", dp_to_di(data->dp())); 1471 st->fill_to(6); 1472 data->print_data_on(st, this); 1473 } 1474 st->print_cr("--- Extra data:"); 1475 DataLayout* dp = extra_data_base(); 1476 DataLayout* end = args_data_limit(); 1477 for (;; dp = next_extra(dp)) { 1478 assert(dp < end, "moved past end of extra data"); 1479 // No need for "OrderAccess::load_acquire" ops, 1480 // since the data structure is monotonic. 1481 switch(dp->tag()) { 1482 case DataLayout::no_tag: 1483 continue; 1484 case DataLayout::bit_data_tag: 1485 data = new BitData(dp); 1486 break; 1487 case DataLayout::speculative_trap_data_tag: 1488 data = new SpeculativeTrapData(dp); 1489 break; 1490 case DataLayout::arg_info_data_tag: 1491 data = new ArgInfoData(dp); 1492 dp = end; // ArgInfoData is at the end of extra data section. 1493 break; 1494 default: 1495 fatal("unexpected tag %d", dp->tag()); 1496 } 1497 st->print("%d", dp_to_di(data->dp())); 1498 st->fill_to(6); 1499 data->print_data_on(st); 1500 if (dp >= end) return; 1501 } 1502 } 1503 1504 #if INCLUDE_SERVICES 1505 // Size Statistics 1506 void MethodData::collect_statistics(KlassSizeStats *sz) const { 1507 int n = sz->count(this); 1508 sz->_method_data_bytes += n; 1509 sz->_method_all_bytes += n; 1510 sz->_rw_bytes += n; 1511 } 1512 #endif // INCLUDE_SERVICES 1513 1514 // Verification 1515 1516 void MethodData::verify_on(outputStream* st) { 1517 guarantee(is_methodData(), "object must be method data"); 1518 // guarantee(m->is_perm(), "should be in permspace"); 1519 this->verify_data_on(st); 1520 } 1521 1522 void MethodData::verify_data_on(outputStream* st) { 1523 NEEDS_CLEANUP; 1524 // not yet implemented. 1525 } 1526 1527 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1528 if (m->is_compiled_lambda_form()) { 1529 return true; 1530 } 1531 1532 Bytecode_invoke inv(m , bci); 1533 return inv.is_invokedynamic() || inv.is_invokehandle(); 1534 } 1535 1536 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1537 Bytecode_invoke inv(m , bci); 1538 if (inv.is_invokevirtual() && inv.klass() == vmSymbols::jdk_internal_misc_Unsafe()) { 1539 ResourceMark rm; 1540 char* name = inv.name()->as_C_string(); 1541 if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) { 1542 return true; 1543 } 1544 } 1545 return false; 1546 } 1547 1548 int MethodData::profile_arguments_flag() { 1549 return TypeProfileLevel % 10; 1550 } 1551 1552 bool MethodData::profile_arguments() { 1553 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all; 1554 } 1555 1556 bool MethodData::profile_arguments_jsr292_only() { 1557 return profile_arguments_flag() == type_profile_jsr292; 1558 } 1559 1560 bool MethodData::profile_all_arguments() { 1561 return profile_arguments_flag() == type_profile_all; 1562 } 1563 1564 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1565 if (!profile_arguments()) { 1566 return false; 1567 } 1568 1569 if (profile_all_arguments()) { 1570 return true; 1571 } 1572 1573 if (profile_unsafe(m, bci)) { 1574 return true; 1575 } 1576 1577 assert(profile_arguments_jsr292_only(), "inconsistent"); 1578 return profile_jsr292(m, bci); 1579 } 1580 1581 int MethodData::profile_return_flag() { 1582 return (TypeProfileLevel % 100) / 10; 1583 } 1584 1585 bool MethodData::profile_return() { 1586 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1587 } 1588 1589 bool MethodData::profile_return_jsr292_only() { 1590 return profile_return_flag() == type_profile_jsr292; 1591 } 1592 1593 bool MethodData::profile_all_return() { 1594 return profile_return_flag() == type_profile_all; 1595 } 1596 1597 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1598 if (!profile_return()) { 1599 return false; 1600 } 1601 1602 if (profile_all_return()) { 1603 return true; 1604 } 1605 1606 assert(profile_return_jsr292_only(), "inconsistent"); 1607 return profile_jsr292(m, bci); 1608 } 1609 1610 int MethodData::profile_parameters_flag() { 1611 return TypeProfileLevel / 100; 1612 } 1613 1614 bool MethodData::profile_parameters() { 1615 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1616 } 1617 1618 bool MethodData::profile_parameters_jsr292_only() { 1619 return profile_parameters_flag() == type_profile_jsr292; 1620 } 1621 1622 bool MethodData::profile_all_parameters() { 1623 return profile_parameters_flag() == type_profile_all; 1624 } 1625 1626 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1627 if (!profile_parameters()) { 1628 return false; 1629 } 1630 1631 if (profile_all_parameters()) { 1632 return true; 1633 } 1634 1635 assert(profile_parameters_jsr292_only(), "inconsistent"); 1636 return m->is_compiled_lambda_form(); 1637 } 1638 1639 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1640 log_trace(cds)("Iter(MethodData): %p", this); 1641 it->push(&_method); 1642 } 1643 1644 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1645 if (shift == 0) { 1646 return; 1647 } 1648 if (!reset) { 1649 // Move all cells of trap entry at dp left by "shift" cells 1650 intptr_t* start = (intptr_t*)dp; 1651 intptr_t* end = (intptr_t*)next_extra(dp); 1652 for (intptr_t* ptr = start; ptr < end; ptr++) { 1653 *(ptr-shift) = *ptr; 1654 } 1655 } else { 1656 // Reset "shift" cells stopping at dp 1657 intptr_t* start = ((intptr_t*)dp) - shift; 1658 intptr_t* end = (intptr_t*)dp; 1659 for (intptr_t* ptr = start; ptr < end; ptr++) { 1660 *ptr = 0; 1661 } 1662 } 1663 } 1664 1665 class CleanExtraDataClosure : public StackObj { 1666 public: 1667 virtual bool is_live(Method* m) = 0; 1668 }; 1669 1670 // Check for entries that reference an unloaded method 1671 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1672 private: 1673 BoolObjectClosure* _is_alive; 1674 public: 1675 CleanExtraDataKlassClosure(BoolObjectClosure* is_alive) : _is_alive(is_alive) {} 1676 bool is_live(Method* m) { 1677 return m->method_holder()->is_loader_alive(_is_alive); 1678 } 1679 }; 1680 1681 // Check for entries that reference a redefined method 1682 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1683 public: 1684 CleanExtraDataMethodClosure() {} 1685 bool is_live(Method* m) { return !m->is_old(); } 1686 }; 1687 1688 1689 // Remove SpeculativeTrapData entries that reference an unloaded or 1690 // redefined method 1691 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1692 DataLayout* dp = extra_data_base(); 1693 DataLayout* end = args_data_limit(); 1694 1695 int shift = 0; 1696 for (; dp < end; dp = next_extra(dp)) { 1697 switch(dp->tag()) { 1698 case DataLayout::speculative_trap_data_tag: { 1699 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1700 Method* m = data->method(); 1701 assert(m != NULL, "should have a method"); 1702 if (!cl->is_live(m)) { 1703 // "shift" accumulates the number of cells for dead 1704 // SpeculativeTrapData entries that have been seen so 1705 // far. Following entries must be shifted left by that many 1706 // cells to remove the dead SpeculativeTrapData entries. 1707 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1708 } else { 1709 // Shift this entry left if it follows dead 1710 // SpeculativeTrapData entries 1711 clean_extra_data_helper(dp, shift); 1712 } 1713 break; 1714 } 1715 case DataLayout::bit_data_tag: 1716 // Shift this entry left if it follows dead SpeculativeTrapData 1717 // entries 1718 clean_extra_data_helper(dp, shift); 1719 continue; 1720 case DataLayout::no_tag: 1721 case DataLayout::arg_info_data_tag: 1722 // We are at end of the live trap entries. The previous "shift" 1723 // cells contain entries that are either dead or were shifted 1724 // left. They need to be reset to no_tag 1725 clean_extra_data_helper(dp, shift, true); 1726 return; 1727 default: 1728 fatal("unexpected tag %d", dp->tag()); 1729 } 1730 } 1731 } 1732 1733 // Verify there's no unloaded or redefined method referenced by a 1734 // SpeculativeTrapData entry 1735 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1736 #ifdef ASSERT 1737 DataLayout* dp = extra_data_base(); 1738 DataLayout* end = args_data_limit(); 1739 1740 for (; dp < end; dp = next_extra(dp)) { 1741 switch(dp->tag()) { 1742 case DataLayout::speculative_trap_data_tag: { 1743 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1744 Method* m = data->method(); 1745 assert(m != NULL && cl->is_live(m), "Method should exist"); 1746 break; 1747 } 1748 case DataLayout::bit_data_tag: 1749 continue; 1750 case DataLayout::no_tag: 1751 case DataLayout::arg_info_data_tag: 1752 return; 1753 default: 1754 fatal("unexpected tag %d", dp->tag()); 1755 } 1756 } 1757 #endif 1758 } 1759 1760 void MethodData::clean_method_data(BoolObjectClosure* is_alive) { 1761 ResourceMark rm; 1762 for (ProfileData* data = first_data(); 1763 is_valid(data); 1764 data = next_data(data)) { 1765 data->clean_weak_klass_links(is_alive); 1766 } 1767 ParametersTypeData* parameters = parameters_type_data(); 1768 if (parameters != NULL) { 1769 parameters->clean_weak_klass_links(is_alive); 1770 } 1771 1772 CleanExtraDataKlassClosure cl(is_alive); 1773 clean_extra_data(&cl); 1774 verify_extra_data_clean(&cl); 1775 } 1776 1777 void MethodData::clean_weak_method_links() { 1778 ResourceMark rm; 1779 for (ProfileData* data = first_data(); 1780 is_valid(data); 1781 data = next_data(data)) { 1782 data->clean_weak_method_links(); 1783 } 1784 1785 CleanExtraDataMethodClosure cl; 1786 clean_extra_data(&cl); 1787 verify_extra_data_clean(&cl); 1788 } 1789 1790 #ifdef ASSERT 1791 void MethodData::verify_clean_weak_method_links() { 1792 ResourceMark rm; 1793 for (ProfileData* data = first_data(); 1794 is_valid(data); 1795 data = next_data(data)) { 1796 data->verify_clean_weak_method_links(); 1797 } 1798 1799 CleanExtraDataMethodClosure cl; 1800 verify_extra_data_clean(&cl); 1801 } 1802 #endif // ASSERT