1 /* 2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "compiler/compilerOracle.hpp" 28 #include "interpreter/bytecode.hpp" 29 #include "interpreter/bytecodeStream.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/heapInspection.hpp" 32 #include "memory/metaspaceClosure.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/methodData.hpp" 35 #include "prims/jvmtiRedefineClasses.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/compilationPolicy.hpp" 38 #include "runtime/deoptimization.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 #include "utilities/align.hpp" 42 #include "utilities/copy.hpp" 43 44 // ================================================================== 45 // DataLayout 46 // 47 // Overlay for generic profiling data. 48 49 // Some types of data layouts need a length field. 50 bool DataLayout::needs_array_len(u1 tag) { 51 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 52 } 53 54 // Perform generic initialization of the data. More specific 55 // initialization occurs in overrides of ProfileData::post_initialize. 56 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 57 _header._bits = (intptr_t)0; 58 _header._struct._tag = tag; 59 _header._struct._bci = bci; 60 for (int i = 0; i < cell_count; i++) { 61 set_cell_at(i, (intptr_t)0); 62 } 63 if (needs_array_len(tag)) { 64 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 65 } 66 if (tag == call_type_data_tag) { 67 CallTypeData::initialize(this, cell_count); 68 } else if (tag == virtual_call_type_data_tag) { 69 VirtualCallTypeData::initialize(this, cell_count); 70 } 71 } 72 73 void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) { 74 ResourceMark m; 75 data_in()->clean_weak_klass_links(cl); 76 } 77 78 79 // ================================================================== 80 // ProfileData 81 // 82 // A ProfileData object is created to refer to a section of profiling 83 // data in a structured way. 84 85 // Constructor for invalid ProfileData. 86 ProfileData::ProfileData() { 87 _data = NULL; 88 } 89 90 char* ProfileData::print_data_on_helper(const MethodData* md) const { 91 DataLayout* dp = md->extra_data_base(); 92 DataLayout* end = md->args_data_limit(); 93 stringStream ss; 94 for (;; dp = MethodData::next_extra(dp)) { 95 assert(dp < end, "moved past end of extra data"); 96 switch(dp->tag()) { 97 case DataLayout::speculative_trap_data_tag: 98 if (dp->bci() == bci()) { 99 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 100 int trap = data->trap_state(); 101 char buf[100]; 102 ss.print("trap/"); 103 data->method()->print_short_name(&ss); 104 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 105 } 106 break; 107 case DataLayout::bit_data_tag: 108 break; 109 case DataLayout::no_tag: 110 case DataLayout::arg_info_data_tag: 111 return ss.as_string(); 112 break; 113 default: 114 fatal("unexpected tag %d", dp->tag()); 115 } 116 } 117 return NULL; 118 } 119 120 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 121 print_data_on(st, print_data_on_helper(md)); 122 } 123 124 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 125 st->print("bci: %d", bci()); 126 st->fill_to(tab_width_one); 127 st->print("%s", name); 128 tab(st); 129 int trap = trap_state(); 130 if (trap != 0) { 131 char buf[100]; 132 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 133 } 134 if (extra != NULL) { 135 st->print("%s", extra); 136 } 137 int flags = data()->flags(); 138 if (flags != 0) { 139 st->print("flags(%d) ", flags); 140 } 141 } 142 143 void ProfileData::tab(outputStream* st, bool first) const { 144 st->fill_to(first ? tab_width_one : tab_width_two); 145 } 146 147 // ================================================================== 148 // BitData 149 // 150 // A BitData corresponds to a one-bit flag. This is used to indicate 151 // whether a checkcast bytecode has seen a null value. 152 153 154 void BitData::print_data_on(outputStream* st, const char* extra) const { 155 print_shared(st, "BitData", extra); 156 st->cr(); 157 } 158 159 // ================================================================== 160 // CounterData 161 // 162 // A CounterData corresponds to a simple counter. 163 164 void CounterData::print_data_on(outputStream* st, const char* extra) const { 165 print_shared(st, "CounterData", extra); 166 st->print_cr("count(%u)", count()); 167 } 168 169 // ================================================================== 170 // JumpData 171 // 172 // A JumpData is used to access profiling information for a direct 173 // branch. It is a counter, used for counting the number of branches, 174 // plus a data displacement, used for realigning the data pointer to 175 // the corresponding target bci. 176 177 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 178 assert(stream->bci() == bci(), "wrong pos"); 179 int target; 180 Bytecodes::Code c = stream->code(); 181 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 182 target = stream->dest_w(); 183 } else { 184 target = stream->dest(); 185 } 186 int my_di = mdo->dp_to_di(dp()); 187 int target_di = mdo->bci_to_di(target); 188 int offset = target_di - my_di; 189 set_displacement(offset); 190 } 191 192 void JumpData::print_data_on(outputStream* st, const char* extra) const { 193 print_shared(st, "JumpData", extra); 194 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 195 } 196 197 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 198 // Parameter profiling include the receiver 199 int args_count = include_receiver ? 1 : 0; 200 ResourceMark rm; 201 SignatureStream ss(signature); 202 args_count += ss.reference_parameter_count(); 203 args_count = MIN2(args_count, max); 204 return args_count * per_arg_cell_count; 205 } 206 207 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 208 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 209 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 210 const methodHandle m = stream->method(); 211 int bci = stream->bci(); 212 Bytecode_invoke inv(m, bci); 213 int args_cell = 0; 214 if (MethodData::profile_arguments_for_invoke(m, bci)) { 215 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 216 } 217 int ret_cell = 0; 218 if (MethodData::profile_return_for_invoke(m, bci) && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) { 219 ret_cell = ReturnTypeEntry::static_cell_count(); 220 } 221 int header_cell = 0; 222 if (args_cell + ret_cell > 0) { 223 header_cell = header_cell_count(); 224 } 225 226 return header_cell + args_cell + ret_cell; 227 } 228 229 class ArgumentOffsetComputer : public SignatureInfo { 230 private: 231 int _max; 232 GrowableArray<int> _offsets; 233 234 void set(int size, BasicType type) { _size += size; } 235 void do_object(int begin, int end) { 236 if (_offsets.length() < _max) { 237 _offsets.push(_size); 238 } 239 SignatureInfo::do_object(begin, end); 240 } 241 void do_array (int begin, int end) { 242 if (_offsets.length() < _max) { 243 _offsets.push(_size); 244 } 245 SignatureInfo::do_array(begin, end); 246 } 247 248 public: 249 ArgumentOffsetComputer(Symbol* signature, int max) 250 : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) { 251 } 252 253 int total() { lazy_iterate_parameters(); return _size; } 254 255 int off_at(int i) const { return _offsets.at(i); } 256 }; 257 258 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 259 ResourceMark rm; 260 int start = 0; 261 // Parameter profiling include the receiver 262 if (include_receiver && has_receiver) { 263 set_stack_slot(0, 0); 264 set_type(0, type_none()); 265 start += 1; 266 } 267 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 268 aos.total(); 269 for (int i = start; i < _number_of_entries; i++) { 270 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 271 set_type(i, type_none()); 272 } 273 } 274 275 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 276 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 277 Bytecode_invoke inv(stream->method(), stream->bci()); 278 279 SignatureStream ss(inv.signature()); 280 if (has_arguments()) { 281 #ifdef ASSERT 282 ResourceMark rm; 283 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 284 assert(count > 0, "room for args type but none found?"); 285 check_number_of_arguments(count); 286 #endif 287 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 288 } 289 290 if (has_return()) { 291 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); 292 _ret.post_initialize(); 293 } 294 } 295 296 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 297 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 298 Bytecode_invoke inv(stream->method(), stream->bci()); 299 300 if (has_arguments()) { 301 #ifdef ASSERT 302 ResourceMark rm; 303 SignatureStream ss(inv.signature()); 304 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 305 assert(count > 0, "room for args type but none found?"); 306 check_number_of_arguments(count); 307 #endif 308 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 309 } 310 311 if (has_return()) { 312 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?"); 313 _ret.post_initialize(); 314 } 315 } 316 317 bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) { 318 Klass* k = (Klass*)klass_part(p); 319 return k != NULL && k->is_loader_alive(is_alive_cl); 320 } 321 322 void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 323 for (int i = 0; i < _number_of_entries; i++) { 324 intptr_t p = type(i); 325 if (!is_loader_alive(is_alive_cl, p)) { 326 set_type(i, with_status((Klass*)NULL, p)); 327 } 328 } 329 } 330 331 void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 332 intptr_t p = type(); 333 if (!is_loader_alive(is_alive_cl, p)) { 334 set_type(with_status((Klass*)NULL, p)); 335 } 336 } 337 338 bool TypeEntriesAtCall::return_profiling_enabled() { 339 return MethodData::profile_return(); 340 } 341 342 bool TypeEntriesAtCall::arguments_profiling_enabled() { 343 return MethodData::profile_arguments(); 344 } 345 346 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 347 if (is_type_none(k)) { 348 st->print("none"); 349 } else if (is_type_unknown(k)) { 350 st->print("unknown"); 351 } else { 352 valid_klass(k)->print_value_on(st); 353 } 354 if (was_null_seen(k)) { 355 st->print(" (null seen)"); 356 } 357 } 358 359 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 360 for (int i = 0; i < _number_of_entries; i++) { 361 _pd->tab(st); 362 st->print("%d: stack(%u) ", i, stack_slot(i)); 363 print_klass(st, type(i)); 364 st->cr(); 365 } 366 } 367 368 void ReturnTypeEntry::print_data_on(outputStream* st) const { 369 _pd->tab(st); 370 print_klass(st, type()); 371 st->cr(); 372 } 373 374 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 375 CounterData::print_data_on(st, extra); 376 if (has_arguments()) { 377 tab(st, true); 378 st->print("argument types"); 379 _args.print_data_on(st); 380 } 381 if (has_return()) { 382 tab(st, true); 383 st->print("return type"); 384 _ret.print_data_on(st); 385 } 386 } 387 388 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 389 VirtualCallData::print_data_on(st, extra); 390 if (has_arguments()) { 391 tab(st, true); 392 st->print("argument types"); 393 _args.print_data_on(st); 394 } 395 if (has_return()) { 396 tab(st, true); 397 st->print("return type"); 398 _ret.print_data_on(st); 399 } 400 } 401 402 // ================================================================== 403 // ReceiverTypeData 404 // 405 // A ReceiverTypeData is used to access profiling information about a 406 // dynamic type check. It consists of a counter which counts the total times 407 // that the check is reached, and a series of (Klass*, count) pairs 408 // which are used to store a type profile for the receiver of the check. 409 410 void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 411 for (uint row = 0; row < row_limit(); row++) { 412 Klass* p = receiver(row); 413 if (p != NULL && !p->is_loader_alive(is_alive_cl)) { 414 clear_row(row); 415 } 416 } 417 } 418 419 #if INCLUDE_JVMCI 420 void VirtualCallData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { 421 ReceiverTypeData::clean_weak_klass_links(is_alive_cl); 422 for (uint row = 0; row < method_row_limit(); row++) { 423 Method* p = method(row); 424 if (p != NULL && !p->method_holder()->is_loader_alive(is_alive_cl)) { 425 clear_method_row(row); 426 } 427 } 428 } 429 430 void VirtualCallData::clean_weak_method_links() { 431 ReceiverTypeData::clean_weak_method_links(); 432 for (uint row = 0; row < method_row_limit(); row++) { 433 Method* p = method(row); 434 if (p != NULL && !p->on_stack()) { 435 clear_method_row(row); 436 } 437 } 438 } 439 #endif // INCLUDE_JVMCI 440 441 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 442 uint row; 443 int entries = 0; 444 for (row = 0; row < row_limit(); row++) { 445 if (receiver(row) != NULL) entries++; 446 } 447 #if INCLUDE_JVMCI 448 st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries); 449 #else 450 st->print_cr("count(%u) entries(%u)", count(), entries); 451 #endif 452 int total = count(); 453 for (row = 0; row < row_limit(); row++) { 454 if (receiver(row) != NULL) { 455 total += receiver_count(row); 456 } 457 } 458 for (row = 0; row < row_limit(); row++) { 459 if (receiver(row) != NULL) { 460 tab(st); 461 receiver(row)->print_value_on(st); 462 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 463 } 464 } 465 } 466 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 467 print_shared(st, "ReceiverTypeData", extra); 468 print_receiver_data_on(st); 469 } 470 471 #if INCLUDE_JVMCI 472 void VirtualCallData::print_method_data_on(outputStream* st) const { 473 uint row; 474 int entries = 0; 475 for (row = 0; row < method_row_limit(); row++) { 476 if (method(row) != NULL) entries++; 477 } 478 tab(st); 479 st->print_cr("method_entries(%u)", entries); 480 int total = count(); 481 for (row = 0; row < method_row_limit(); row++) { 482 if (method(row) != NULL) { 483 total += method_count(row); 484 } 485 } 486 for (row = 0; row < method_row_limit(); row++) { 487 if (method(row) != NULL) { 488 tab(st); 489 method(row)->print_value_on(st); 490 st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total); 491 } 492 } 493 } 494 #endif // INCLUDE_JVMCI 495 496 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 497 print_shared(st, "VirtualCallData", extra); 498 print_receiver_data_on(st); 499 print_method_data_on(st); 500 } 501 502 // ================================================================== 503 // RetData 504 // 505 // A RetData is used to access profiling information for a ret bytecode. 506 // It is composed of a count of the number of times that the ret has 507 // been executed, followed by a series of triples of the form 508 // (bci, count, di) which count the number of times that some bci was the 509 // target of the ret and cache a corresponding displacement. 510 511 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 512 for (uint row = 0; row < row_limit(); row++) { 513 set_bci_displacement(row, -1); 514 set_bci(row, no_bci); 515 } 516 // release so other threads see a consistent state. bci is used as 517 // a valid flag for bci_displacement. 518 OrderAccess::release(); 519 } 520 521 // This routine needs to atomically update the RetData structure, so the 522 // caller needs to hold the RetData_lock before it gets here. Since taking 523 // the lock can block (and allow GC) and since RetData is a ProfileData is a 524 // wrapper around a derived oop, taking the lock in _this_ method will 525 // basically cause the 'this' pointer's _data field to contain junk after the 526 // lock. We require the caller to take the lock before making the ProfileData 527 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 528 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 529 // First find the mdp which corresponds to the return bci. 530 address mdp = h_mdo->bci_to_dp(return_bci); 531 532 // Now check to see if any of the cache slots are open. 533 for (uint row = 0; row < row_limit(); row++) { 534 if (bci(row) == no_bci) { 535 set_bci_displacement(row, mdp - dp()); 536 set_bci_count(row, DataLayout::counter_increment); 537 // Barrier to ensure displacement is written before the bci; allows 538 // the interpreter to read displacement without fear of race condition. 539 release_set_bci(row, return_bci); 540 break; 541 } 542 } 543 return mdp; 544 } 545 546 #ifdef CC_INTERP 547 DataLayout* RetData::advance(MethodData *md, int bci) { 548 return (DataLayout*) md->bci_to_dp(bci); 549 } 550 #endif // CC_INTERP 551 552 void RetData::print_data_on(outputStream* st, const char* extra) const { 553 print_shared(st, "RetData", extra); 554 uint row; 555 int entries = 0; 556 for (row = 0; row < row_limit(); row++) { 557 if (bci(row) != no_bci) entries++; 558 } 559 st->print_cr("count(%u) entries(%u)", count(), entries); 560 for (row = 0; row < row_limit(); row++) { 561 if (bci(row) != no_bci) { 562 tab(st); 563 st->print_cr("bci(%d: count(%u) displacement(%d))", 564 bci(row), bci_count(row), bci_displacement(row)); 565 } 566 } 567 } 568 569 // ================================================================== 570 // BranchData 571 // 572 // A BranchData is used to access profiling data for a two-way branch. 573 // It consists of taken and not_taken counts as well as a data displacement 574 // for the taken case. 575 576 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 577 assert(stream->bci() == bci(), "wrong pos"); 578 int target = stream->dest(); 579 int my_di = mdo->dp_to_di(dp()); 580 int target_di = mdo->bci_to_di(target); 581 int offset = target_di - my_di; 582 set_displacement(offset); 583 } 584 585 void BranchData::print_data_on(outputStream* st, const char* extra) const { 586 print_shared(st, "BranchData", extra); 587 st->print_cr("taken(%u) displacement(%d)", 588 taken(), displacement()); 589 tab(st); 590 st->print_cr("not taken(%u)", not_taken()); 591 } 592 593 // ================================================================== 594 // MultiBranchData 595 // 596 // A MultiBranchData is used to access profiling information for 597 // a multi-way branch (*switch bytecodes). It consists of a series 598 // of (count, displacement) pairs, which count the number of times each 599 // case was taken and specify the data displacment for each branch target. 600 601 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 602 int cell_count = 0; 603 if (stream->code() == Bytecodes::_tableswitch) { 604 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 605 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 606 } else { 607 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 608 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 609 } 610 return cell_count; 611 } 612 613 void MultiBranchData::post_initialize(BytecodeStream* stream, 614 MethodData* mdo) { 615 assert(stream->bci() == bci(), "wrong pos"); 616 int target; 617 int my_di; 618 int target_di; 619 int offset; 620 if (stream->code() == Bytecodes::_tableswitch) { 621 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 622 int len = sw.length(); 623 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 624 for (int count = 0; count < len; count++) { 625 target = sw.dest_offset_at(count) + bci(); 626 my_di = mdo->dp_to_di(dp()); 627 target_di = mdo->bci_to_di(target); 628 offset = target_di - my_di; 629 set_displacement_at(count, offset); 630 } 631 target = sw.default_offset() + bci(); 632 my_di = mdo->dp_to_di(dp()); 633 target_di = mdo->bci_to_di(target); 634 offset = target_di - my_di; 635 set_default_displacement(offset); 636 637 } else { 638 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 639 int npairs = sw.number_of_pairs(); 640 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 641 for (int count = 0; count < npairs; count++) { 642 LookupswitchPair pair = sw.pair_at(count); 643 target = pair.offset() + bci(); 644 my_di = mdo->dp_to_di(dp()); 645 target_di = mdo->bci_to_di(target); 646 offset = target_di - my_di; 647 set_displacement_at(count, offset); 648 } 649 target = sw.default_offset() + bci(); 650 my_di = mdo->dp_to_di(dp()); 651 target_di = mdo->bci_to_di(target); 652 offset = target_di - my_di; 653 set_default_displacement(offset); 654 } 655 } 656 657 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 658 print_shared(st, "MultiBranchData", extra); 659 st->print_cr("default_count(%u) displacement(%d)", 660 default_count(), default_displacement()); 661 int cases = number_of_cases(); 662 for (int i = 0; i < cases; i++) { 663 tab(st); 664 st->print_cr("count(%u) displacement(%d)", 665 count_at(i), displacement_at(i)); 666 } 667 } 668 669 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 670 print_shared(st, "ArgInfoData", extra); 671 int nargs = number_of_args(); 672 for (int i = 0; i < nargs; i++) { 673 st->print(" 0x%x", arg_modified(i)); 674 } 675 st->cr(); 676 } 677 678 int ParametersTypeData::compute_cell_count(Method* m) { 679 if (!MethodData::profile_parameters_for_method(m)) { 680 return 0; 681 } 682 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 683 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 684 if (obj_args > 0) { 685 return obj_args + 1; // 1 cell for array len 686 } 687 return 0; 688 } 689 690 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 691 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 692 } 693 694 bool ParametersTypeData::profiling_enabled() { 695 return MethodData::profile_parameters(); 696 } 697 698 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 699 st->print("parameter types"); // FIXME extra ignored? 700 _parameters.print_data_on(st); 701 } 702 703 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 704 print_shared(st, "SpeculativeTrapData", extra); 705 tab(st); 706 method()->print_short_name(st); 707 st->cr(); 708 } 709 710 // ================================================================== 711 // MethodData* 712 // 713 // A MethodData* holds information which has been collected about 714 // a method. 715 716 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 717 int size = MethodData::compute_allocation_size_in_words(method); 718 719 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 720 MethodData(method(), size, THREAD); 721 } 722 723 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 724 if (is_client_compilation_mode_vm()) { 725 return no_profile_data; 726 } 727 switch (code) { 728 case Bytecodes::_checkcast: 729 case Bytecodes::_instanceof: 730 case Bytecodes::_aastore: 731 if (TypeProfileCasts) { 732 return ReceiverTypeData::static_cell_count(); 733 } else { 734 return BitData::static_cell_count(); 735 } 736 case Bytecodes::_invokespecial: 737 case Bytecodes::_invokestatic: 738 if (MethodData::profile_arguments() || MethodData::profile_return()) { 739 return variable_cell_count; 740 } else { 741 return CounterData::static_cell_count(); 742 } 743 case Bytecodes::_goto: 744 case Bytecodes::_goto_w: 745 case Bytecodes::_jsr: 746 case Bytecodes::_jsr_w: 747 return JumpData::static_cell_count(); 748 case Bytecodes::_invokevirtual: 749 case Bytecodes::_invokeinterface: 750 if (MethodData::profile_arguments() || MethodData::profile_return()) { 751 return variable_cell_count; 752 } else { 753 return VirtualCallData::static_cell_count(); 754 } 755 case Bytecodes::_invokedynamic: 756 if (MethodData::profile_arguments() || MethodData::profile_return()) { 757 return variable_cell_count; 758 } else { 759 return CounterData::static_cell_count(); 760 } 761 case Bytecodes::_ret: 762 return RetData::static_cell_count(); 763 case Bytecodes::_ifeq: 764 case Bytecodes::_ifne: 765 case Bytecodes::_iflt: 766 case Bytecodes::_ifge: 767 case Bytecodes::_ifgt: 768 case Bytecodes::_ifle: 769 case Bytecodes::_if_icmpeq: 770 case Bytecodes::_if_icmpne: 771 case Bytecodes::_if_icmplt: 772 case Bytecodes::_if_icmpge: 773 case Bytecodes::_if_icmpgt: 774 case Bytecodes::_if_icmple: 775 case Bytecodes::_if_acmpeq: 776 case Bytecodes::_if_acmpne: 777 case Bytecodes::_ifnull: 778 case Bytecodes::_ifnonnull: 779 return BranchData::static_cell_count(); 780 case Bytecodes::_lookupswitch: 781 case Bytecodes::_tableswitch: 782 return variable_cell_count; 783 default: 784 return no_profile_data; 785 } 786 } 787 788 // Compute the size of the profiling information corresponding to 789 // the current bytecode. 790 int MethodData::compute_data_size(BytecodeStream* stream) { 791 int cell_count = bytecode_cell_count(stream->code()); 792 if (cell_count == no_profile_data) { 793 return 0; 794 } 795 if (cell_count == variable_cell_count) { 796 switch (stream->code()) { 797 case Bytecodes::_lookupswitch: 798 case Bytecodes::_tableswitch: 799 cell_count = MultiBranchData::compute_cell_count(stream); 800 break; 801 case Bytecodes::_invokespecial: 802 case Bytecodes::_invokestatic: 803 case Bytecodes::_invokedynamic: 804 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 805 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 806 profile_return_for_invoke(stream->method(), stream->bci())) { 807 cell_count = CallTypeData::compute_cell_count(stream); 808 } else { 809 cell_count = CounterData::static_cell_count(); 810 } 811 break; 812 case Bytecodes::_invokevirtual: 813 case Bytecodes::_invokeinterface: { 814 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 815 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 816 profile_return_for_invoke(stream->method(), stream->bci())) { 817 cell_count = VirtualCallTypeData::compute_cell_count(stream); 818 } else { 819 cell_count = VirtualCallData::static_cell_count(); 820 } 821 break; 822 } 823 default: 824 fatal("unexpected bytecode for var length profile data"); 825 } 826 } 827 // Note: cell_count might be zero, meaning that there is just 828 // a DataLayout header, with no extra cells. 829 assert(cell_count >= 0, "sanity"); 830 return DataLayout::compute_size_in_bytes(cell_count); 831 } 832 833 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 834 // Bytecodes for which we may use speculation 835 switch (code) { 836 case Bytecodes::_checkcast: 837 case Bytecodes::_instanceof: 838 case Bytecodes::_aastore: 839 case Bytecodes::_invokevirtual: 840 case Bytecodes::_invokeinterface: 841 case Bytecodes::_if_acmpeq: 842 case Bytecodes::_if_acmpne: 843 case Bytecodes::_ifnull: 844 case Bytecodes::_ifnonnull: 845 case Bytecodes::_invokestatic: 846 #ifdef COMPILER2 847 if (is_server_compilation_mode_vm()) { 848 return UseTypeSpeculation; 849 } 850 #endif 851 default: 852 return false; 853 } 854 return false; 855 } 856 857 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 858 #if INCLUDE_JVMCI 859 if (ProfileTraps) { 860 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 861 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 862 863 // Make sure we have a minimum number of extra data slots to 864 // allocate SpeculativeTrapData entries. We would want to have one 865 // entry per compilation that inlines this method and for which 866 // some type speculation assumption fails. So the room we need for 867 // the SpeculativeTrapData entries doesn't directly depend on the 868 // size of the method. Because it's hard to estimate, we reserve 869 // space for an arbitrary number of entries. 870 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 871 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 872 873 return MAX2(extra_data_count, spec_data_count); 874 } else { 875 return 0; 876 } 877 #else // INCLUDE_JVMCI 878 if (ProfileTraps) { 879 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 880 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 881 // If the method is large, let the extra BCIs grow numerous (to ~1%). 882 int one_percent_of_data 883 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 884 if (extra_data_count < one_percent_of_data) 885 extra_data_count = one_percent_of_data; 886 if (extra_data_count > empty_bc_count) 887 extra_data_count = empty_bc_count; // no need for more 888 889 // Make sure we have a minimum number of extra data slots to 890 // allocate SpeculativeTrapData entries. We would want to have one 891 // entry per compilation that inlines this method and for which 892 // some type speculation assumption fails. So the room we need for 893 // the SpeculativeTrapData entries doesn't directly depend on the 894 // size of the method. Because it's hard to estimate, we reserve 895 // space for an arbitrary number of entries. 896 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 897 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 898 899 return MAX2(extra_data_count, spec_data_count); 900 } else { 901 return 0; 902 } 903 #endif // INCLUDE_JVMCI 904 } 905 906 // Compute the size of the MethodData* necessary to store 907 // profiling information about a given method. Size is in bytes. 908 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 909 int data_size = 0; 910 BytecodeStream stream(method); 911 Bytecodes::Code c; 912 int empty_bc_count = 0; // number of bytecodes lacking data 913 bool needs_speculative_traps = false; 914 while ((c = stream.next()) >= 0) { 915 int size_in_bytes = compute_data_size(&stream); 916 data_size += size_in_bytes; 917 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 918 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 919 } 920 int object_size = in_bytes(data_offset()) + data_size; 921 922 // Add some extra DataLayout cells (at least one) to track stray traps. 923 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 924 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 925 926 // Add a cell to record information about modified arguments. 927 int arg_size = method->size_of_parameters(); 928 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 929 930 // Reserve room for an area of the MDO dedicated to profiling of 931 // parameters 932 int args_cell = ParametersTypeData::compute_cell_count(method()); 933 if (args_cell > 0) { 934 object_size += DataLayout::compute_size_in_bytes(args_cell); 935 } 936 return object_size; 937 } 938 939 // Compute the size of the MethodData* necessary to store 940 // profiling information about a given method. Size is in words 941 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 942 int byte_size = compute_allocation_size_in_bytes(method); 943 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 944 return align_metadata_size(word_size); 945 } 946 947 // Initialize an individual data segment. Returns the size of 948 // the segment in bytes. 949 int MethodData::initialize_data(BytecodeStream* stream, 950 int data_index) { 951 if (is_client_compilation_mode_vm()) { 952 return 0; 953 } 954 int cell_count = -1; 955 int tag = DataLayout::no_tag; 956 DataLayout* data_layout = data_layout_at(data_index); 957 Bytecodes::Code c = stream->code(); 958 switch (c) { 959 case Bytecodes::_checkcast: 960 case Bytecodes::_instanceof: 961 case Bytecodes::_aastore: 962 if (TypeProfileCasts) { 963 cell_count = ReceiverTypeData::static_cell_count(); 964 tag = DataLayout::receiver_type_data_tag; 965 } else { 966 cell_count = BitData::static_cell_count(); 967 tag = DataLayout::bit_data_tag; 968 } 969 break; 970 case Bytecodes::_invokespecial: 971 case Bytecodes::_invokestatic: { 972 int counter_data_cell_count = CounterData::static_cell_count(); 973 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 974 profile_return_for_invoke(stream->method(), stream->bci())) { 975 cell_count = CallTypeData::compute_cell_count(stream); 976 } else { 977 cell_count = counter_data_cell_count; 978 } 979 if (cell_count > counter_data_cell_count) { 980 tag = DataLayout::call_type_data_tag; 981 } else { 982 tag = DataLayout::counter_data_tag; 983 } 984 break; 985 } 986 case Bytecodes::_goto: 987 case Bytecodes::_goto_w: 988 case Bytecodes::_jsr: 989 case Bytecodes::_jsr_w: 990 cell_count = JumpData::static_cell_count(); 991 tag = DataLayout::jump_data_tag; 992 break; 993 case Bytecodes::_invokevirtual: 994 case Bytecodes::_invokeinterface: { 995 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 996 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 997 profile_return_for_invoke(stream->method(), stream->bci())) { 998 cell_count = VirtualCallTypeData::compute_cell_count(stream); 999 } else { 1000 cell_count = virtual_call_data_cell_count; 1001 } 1002 if (cell_count > virtual_call_data_cell_count) { 1003 tag = DataLayout::virtual_call_type_data_tag; 1004 } else { 1005 tag = DataLayout::virtual_call_data_tag; 1006 } 1007 break; 1008 } 1009 case Bytecodes::_invokedynamic: { 1010 // %%% should make a type profile for any invokedynamic that takes a ref argument 1011 int counter_data_cell_count = CounterData::static_cell_count(); 1012 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1013 profile_return_for_invoke(stream->method(), stream->bci())) { 1014 cell_count = CallTypeData::compute_cell_count(stream); 1015 } else { 1016 cell_count = counter_data_cell_count; 1017 } 1018 if (cell_count > counter_data_cell_count) { 1019 tag = DataLayout::call_type_data_tag; 1020 } else { 1021 tag = DataLayout::counter_data_tag; 1022 } 1023 break; 1024 } 1025 case Bytecodes::_ret: 1026 cell_count = RetData::static_cell_count(); 1027 tag = DataLayout::ret_data_tag; 1028 break; 1029 case Bytecodes::_ifeq: 1030 case Bytecodes::_ifne: 1031 case Bytecodes::_iflt: 1032 case Bytecodes::_ifge: 1033 case Bytecodes::_ifgt: 1034 case Bytecodes::_ifle: 1035 case Bytecodes::_if_icmpeq: 1036 case Bytecodes::_if_icmpne: 1037 case Bytecodes::_if_icmplt: 1038 case Bytecodes::_if_icmpge: 1039 case Bytecodes::_if_icmpgt: 1040 case Bytecodes::_if_icmple: 1041 case Bytecodes::_if_acmpeq: 1042 case Bytecodes::_if_acmpne: 1043 case Bytecodes::_ifnull: 1044 case Bytecodes::_ifnonnull: 1045 cell_count = BranchData::static_cell_count(); 1046 tag = DataLayout::branch_data_tag; 1047 break; 1048 case Bytecodes::_lookupswitch: 1049 case Bytecodes::_tableswitch: 1050 cell_count = MultiBranchData::compute_cell_count(stream); 1051 tag = DataLayout::multi_branch_data_tag; 1052 break; 1053 default: 1054 break; 1055 } 1056 assert(tag == DataLayout::multi_branch_data_tag || 1057 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1058 (tag == DataLayout::call_type_data_tag || 1059 tag == DataLayout::counter_data_tag || 1060 tag == DataLayout::virtual_call_type_data_tag || 1061 tag == DataLayout::virtual_call_data_tag)) || 1062 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1063 if (cell_count >= 0) { 1064 assert(tag != DataLayout::no_tag, "bad tag"); 1065 assert(bytecode_has_profile(c), "agree w/ BHP"); 1066 data_layout->initialize(tag, stream->bci(), cell_count); 1067 return DataLayout::compute_size_in_bytes(cell_count); 1068 } else { 1069 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1070 return 0; 1071 } 1072 } 1073 1074 // Get the data at an arbitrary (sort of) data index. 1075 ProfileData* MethodData::data_at(int data_index) const { 1076 if (out_of_bounds(data_index)) { 1077 return NULL; 1078 } 1079 DataLayout* data_layout = data_layout_at(data_index); 1080 return data_layout->data_in(); 1081 } 1082 1083 ProfileData* DataLayout::data_in() { 1084 switch (tag()) { 1085 case DataLayout::no_tag: 1086 default: 1087 ShouldNotReachHere(); 1088 return NULL; 1089 case DataLayout::bit_data_tag: 1090 return new BitData(this); 1091 case DataLayout::counter_data_tag: 1092 return new CounterData(this); 1093 case DataLayout::jump_data_tag: 1094 return new JumpData(this); 1095 case DataLayout::receiver_type_data_tag: 1096 return new ReceiverTypeData(this); 1097 case DataLayout::virtual_call_data_tag: 1098 return new VirtualCallData(this); 1099 case DataLayout::ret_data_tag: 1100 return new RetData(this); 1101 case DataLayout::branch_data_tag: 1102 return new BranchData(this); 1103 case DataLayout::multi_branch_data_tag: 1104 return new MultiBranchData(this); 1105 case DataLayout::arg_info_data_tag: 1106 return new ArgInfoData(this); 1107 case DataLayout::call_type_data_tag: 1108 return new CallTypeData(this); 1109 case DataLayout::virtual_call_type_data_tag: 1110 return new VirtualCallTypeData(this); 1111 case DataLayout::parameters_type_data_tag: 1112 return new ParametersTypeData(this); 1113 case DataLayout::speculative_trap_data_tag: 1114 return new SpeculativeTrapData(this); 1115 } 1116 } 1117 1118 // Iteration over data. 1119 ProfileData* MethodData::next_data(ProfileData* current) const { 1120 int current_index = dp_to_di(current->dp()); 1121 int next_index = current_index + current->size_in_bytes(); 1122 ProfileData* next = data_at(next_index); 1123 return next; 1124 } 1125 1126 // Give each of the data entries a chance to perform specific 1127 // data initialization. 1128 void MethodData::post_initialize(BytecodeStream* stream) { 1129 ResourceMark rm; 1130 ProfileData* data; 1131 for (data = first_data(); is_valid(data); data = next_data(data)) { 1132 stream->set_start(data->bci()); 1133 stream->next(); 1134 data->post_initialize(stream, this); 1135 } 1136 if (_parameters_type_data_di != no_parameters) { 1137 parameters_type_data()->post_initialize(NULL, this); 1138 } 1139 } 1140 1141 // Initialize the MethodData* corresponding to a given method. 1142 MethodData::MethodData(const methodHandle& method, int size, TRAPS) 1143 : _extra_data_lock(Monitor::leaf, "MDO extra data lock"), 1144 _parameters_type_data_di(parameters_uninitialized) { 1145 // Set the method back-pointer. 1146 _method = method(); 1147 initialize(); 1148 } 1149 1150 void MethodData::initialize() { 1151 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1152 ResourceMark rm; 1153 1154 init(); 1155 set_creation_mileage(mileage_of(method())); 1156 1157 // Go through the bytecodes and allocate and initialize the 1158 // corresponding data cells. 1159 int data_size = 0; 1160 int empty_bc_count = 0; // number of bytecodes lacking data 1161 _data[0] = 0; // apparently not set below. 1162 BytecodeStream stream(method()); 1163 Bytecodes::Code c; 1164 bool needs_speculative_traps = false; 1165 while ((c = stream.next()) >= 0) { 1166 int size_in_bytes = initialize_data(&stream, data_size); 1167 data_size += size_in_bytes; 1168 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1169 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1170 } 1171 _data_size = data_size; 1172 int object_size = in_bytes(data_offset()) + data_size; 1173 1174 // Add some extra DataLayout cells (at least one) to track stray traps. 1175 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1176 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1177 1178 // Let's zero the space for the extra data 1179 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1180 1181 // Add a cell to record information about modified arguments. 1182 // Set up _args_modified array after traps cells so that 1183 // the code for traps cells works. 1184 DataLayout *dp = data_layout_at(data_size + extra_size); 1185 1186 int arg_size = method()->size_of_parameters(); 1187 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1188 1189 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1190 object_size += extra_size + arg_data_size; 1191 1192 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1193 // If we are profiling parameters, we reserver an area near the end 1194 // of the MDO after the slots for bytecodes (because there's no bci 1195 // for method entry so they don't fit with the framework for the 1196 // profiling of bytecodes). We store the offset within the MDO of 1197 // this area (or -1 if no parameter is profiled) 1198 if (parms_cell > 0) { 1199 object_size += DataLayout::compute_size_in_bytes(parms_cell); 1200 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1201 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1202 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1203 } else { 1204 _parameters_type_data_di = no_parameters; 1205 } 1206 1207 // Set an initial hint. Don't use set_hint_di() because 1208 // first_di() may be out of bounds if data_size is 0. 1209 // In that situation, _hint_di is never used, but at 1210 // least well-defined. 1211 _hint_di = first_di(); 1212 1213 post_initialize(&stream); 1214 1215 assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size"); 1216 set_size(object_size); 1217 } 1218 1219 void MethodData::init() { 1220 _invocation_counter.init(); 1221 _backedge_counter.init(); 1222 _invocation_counter_start = 0; 1223 _backedge_counter_start = 0; 1224 1225 // Set per-method invoke- and backedge mask. 1226 double scale = 1.0; 1227 CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale); 1228 _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1229 _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1230 1231 _tenure_traps = 0; 1232 _num_loops = 0; 1233 _num_blocks = 0; 1234 _would_profile = unknown; 1235 1236 #if INCLUDE_JVMCI 1237 _jvmci_ir_size = 0; 1238 #endif 1239 1240 #if INCLUDE_RTM_OPT 1241 _rtm_state = NoRTM; // No RTM lock eliding by default 1242 if (UseRTMLocking && 1243 !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) { 1244 if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) { 1245 // Generate RTM lock eliding code without abort ratio calculation code. 1246 _rtm_state = UseRTM; 1247 } else if (UseRTMDeopt) { 1248 // Generate RTM lock eliding code and include abort ratio calculation 1249 // code if UseRTMDeopt is on. 1250 _rtm_state = ProfileRTM; 1251 } 1252 } 1253 #endif 1254 1255 // Initialize flags and trap history. 1256 _nof_decompiles = 0; 1257 _nof_overflow_recompiles = 0; 1258 _nof_overflow_traps = 0; 1259 clear_escape_info(); 1260 assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align"); 1261 Copy::zero_to_words((HeapWord*) &_trap_hist, 1262 sizeof(_trap_hist) / sizeof(HeapWord)); 1263 } 1264 1265 // Get a measure of how much mileage the method has on it. 1266 int MethodData::mileage_of(Method* method) { 1267 int mileage = 0; 1268 if (TieredCompilation) { 1269 mileage = MAX2(method->invocation_count(), method->backedge_count()); 1270 } else { 1271 int iic = method->interpreter_invocation_count(); 1272 if (mileage < iic) mileage = iic; 1273 MethodCounters* mcs = method->method_counters(); 1274 if (mcs != NULL) { 1275 InvocationCounter* ic = mcs->invocation_counter(); 1276 InvocationCounter* bc = mcs->backedge_counter(); 1277 int icval = ic->count(); 1278 if (ic->carry()) icval += CompileThreshold; 1279 if (mileage < icval) mileage = icval; 1280 int bcval = bc->count(); 1281 if (bc->carry()) bcval += CompileThreshold; 1282 if (mileage < bcval) mileage = bcval; 1283 } 1284 } 1285 return mileage; 1286 } 1287 1288 bool MethodData::is_mature() const { 1289 return CompilationPolicy::policy()->is_mature(_method); 1290 } 1291 1292 // Translate a bci to its corresponding data index (di). 1293 address MethodData::bci_to_dp(int bci) { 1294 ResourceMark rm; 1295 ProfileData* data = data_before(bci); 1296 ProfileData* prev = NULL; 1297 for ( ; is_valid(data); data = next_data(data)) { 1298 if (data->bci() >= bci) { 1299 if (data->bci() == bci) set_hint_di(dp_to_di(data->dp())); 1300 else if (prev != NULL) set_hint_di(dp_to_di(prev->dp())); 1301 return data->dp(); 1302 } 1303 prev = data; 1304 } 1305 return (address)limit_data_position(); 1306 } 1307 1308 // Translate a bci to its corresponding data, or NULL. 1309 ProfileData* MethodData::bci_to_data(int bci) { 1310 ProfileData* data = data_before(bci); 1311 for ( ; is_valid(data); data = next_data(data)) { 1312 if (data->bci() == bci) { 1313 set_hint_di(dp_to_di(data->dp())); 1314 return data; 1315 } else if (data->bci() > bci) { 1316 break; 1317 } 1318 } 1319 return bci_to_extra_data(bci, NULL, false); 1320 } 1321 1322 DataLayout* MethodData::next_extra(DataLayout* dp) { 1323 int nb_cells = 0; 1324 switch(dp->tag()) { 1325 case DataLayout::bit_data_tag: 1326 case DataLayout::no_tag: 1327 nb_cells = BitData::static_cell_count(); 1328 break; 1329 case DataLayout::speculative_trap_data_tag: 1330 nb_cells = SpeculativeTrapData::static_cell_count(); 1331 break; 1332 default: 1333 fatal("unexpected tag %d", dp->tag()); 1334 } 1335 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1336 } 1337 1338 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) { 1339 DataLayout* end = args_data_limit(); 1340 1341 for (;; dp = next_extra(dp)) { 1342 assert(dp < end, "moved past end of extra data"); 1343 // No need for "OrderAccess::load_acquire" ops, 1344 // since the data structure is monotonic. 1345 switch(dp->tag()) { 1346 case DataLayout::no_tag: 1347 return NULL; 1348 case DataLayout::arg_info_data_tag: 1349 dp = end; 1350 return NULL; // ArgInfoData is at the end of extra data section. 1351 case DataLayout::bit_data_tag: 1352 if (m == NULL && dp->bci() == bci) { 1353 return new BitData(dp); 1354 } 1355 break; 1356 case DataLayout::speculative_trap_data_tag: 1357 if (m != NULL) { 1358 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1359 // data->method() may be null in case of a concurrent 1360 // allocation. Maybe it's for the same method. Try to use that 1361 // entry in that case. 1362 if (dp->bci() == bci) { 1363 if (data->method() == NULL) { 1364 assert(concurrent, "impossible because no concurrent allocation"); 1365 return NULL; 1366 } else if (data->method() == m) { 1367 return data; 1368 } 1369 } 1370 } 1371 break; 1372 default: 1373 fatal("unexpected tag %d", dp->tag()); 1374 } 1375 } 1376 return NULL; 1377 } 1378 1379 1380 // Translate a bci to its corresponding extra data, or NULL. 1381 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1382 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1383 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1384 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1385 "code needs to be adjusted"); 1386 1387 // Do not create one of these if method has been redefined. 1388 if (m != NULL && m->is_old()) { 1389 return NULL; 1390 } 1391 1392 DataLayout* dp = extra_data_base(); 1393 DataLayout* end = args_data_limit(); 1394 1395 // Allocation in the extra data space has to be atomic because not 1396 // all entries have the same size and non atomic concurrent 1397 // allocation would result in a corrupted extra data space. 1398 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); 1399 if (result != NULL) { 1400 return result; 1401 } 1402 1403 if (create_if_missing && dp < end) { 1404 MutexLocker ml(&_extra_data_lock); 1405 // Check again now that we have the lock. Another thread may 1406 // have added extra data entries. 1407 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); 1408 if (result != NULL || dp >= end) { 1409 return result; 1410 } 1411 1412 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free"); 1413 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1414 u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1415 // SpeculativeTrapData is 2 slots. Make sure we have room. 1416 if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) { 1417 return NULL; 1418 } 1419 DataLayout temp; 1420 temp.initialize(tag, bci, 0); 1421 1422 dp->set_header(temp.header()); 1423 assert(dp->tag() == tag, "sane"); 1424 assert(dp->bci() == bci, "no concurrent allocation"); 1425 if (tag == DataLayout::bit_data_tag) { 1426 return new BitData(dp); 1427 } else { 1428 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1429 data->set_method(m); 1430 return data; 1431 } 1432 } 1433 return NULL; 1434 } 1435 1436 ArgInfoData *MethodData::arg_info() { 1437 DataLayout* dp = extra_data_base(); 1438 DataLayout* end = args_data_limit(); 1439 for (; dp < end; dp = next_extra(dp)) { 1440 if (dp->tag() == DataLayout::arg_info_data_tag) 1441 return new ArgInfoData(dp); 1442 } 1443 return NULL; 1444 } 1445 1446 // Printing 1447 1448 void MethodData::print_on(outputStream* st) const { 1449 assert(is_methodData(), "should be method data"); 1450 st->print("method data for "); 1451 method()->print_value_on(st); 1452 st->cr(); 1453 print_data_on(st); 1454 } 1455 1456 void MethodData::print_value_on(outputStream* st) const { 1457 assert(is_methodData(), "should be method data"); 1458 st->print("method data for "); 1459 method()->print_value_on(st); 1460 } 1461 1462 void MethodData::print_data_on(outputStream* st) const { 1463 ResourceMark rm; 1464 ProfileData* data = first_data(); 1465 if (_parameters_type_data_di != no_parameters) { 1466 parameters_type_data()->print_data_on(st); 1467 } 1468 for ( ; is_valid(data); data = next_data(data)) { 1469 st->print("%d", dp_to_di(data->dp())); 1470 st->fill_to(6); 1471 data->print_data_on(st, this); 1472 } 1473 st->print_cr("--- Extra data:"); 1474 DataLayout* dp = extra_data_base(); 1475 DataLayout* end = args_data_limit(); 1476 for (;; dp = next_extra(dp)) { 1477 assert(dp < end, "moved past end of extra data"); 1478 // No need for "OrderAccess::load_acquire" ops, 1479 // since the data structure is monotonic. 1480 switch(dp->tag()) { 1481 case DataLayout::no_tag: 1482 continue; 1483 case DataLayout::bit_data_tag: 1484 data = new BitData(dp); 1485 break; 1486 case DataLayout::speculative_trap_data_tag: 1487 data = new SpeculativeTrapData(dp); 1488 break; 1489 case DataLayout::arg_info_data_tag: 1490 data = new ArgInfoData(dp); 1491 dp = end; // ArgInfoData is at the end of extra data section. 1492 break; 1493 default: 1494 fatal("unexpected tag %d", dp->tag()); 1495 } 1496 st->print("%d", dp_to_di(data->dp())); 1497 st->fill_to(6); 1498 data->print_data_on(st); 1499 if (dp >= end) return; 1500 } 1501 } 1502 1503 #if INCLUDE_SERVICES 1504 // Size Statistics 1505 void MethodData::collect_statistics(KlassSizeStats *sz) const { 1506 int n = sz->count(this); 1507 sz->_method_data_bytes += n; 1508 sz->_method_all_bytes += n; 1509 sz->_rw_bytes += n; 1510 } 1511 #endif // INCLUDE_SERVICES 1512 1513 // Verification 1514 1515 void MethodData::verify_on(outputStream* st) { 1516 guarantee(is_methodData(), "object must be method data"); 1517 // guarantee(m->is_perm(), "should be in permspace"); 1518 this->verify_data_on(st); 1519 } 1520 1521 void MethodData::verify_data_on(outputStream* st) { 1522 NEEDS_CLEANUP; 1523 // not yet implemented. 1524 } 1525 1526 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1527 if (m->is_compiled_lambda_form()) { 1528 return true; 1529 } 1530 1531 Bytecode_invoke inv(m , bci); 1532 return inv.is_invokedynamic() || inv.is_invokehandle(); 1533 } 1534 1535 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1536 Bytecode_invoke inv(m , bci); 1537 if (inv.is_invokevirtual() && inv.klass() == vmSymbols::jdk_internal_misc_Unsafe()) { 1538 ResourceMark rm; 1539 char* name = inv.name()->as_C_string(); 1540 if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) { 1541 return true; 1542 } 1543 } 1544 return false; 1545 } 1546 1547 int MethodData::profile_arguments_flag() { 1548 return TypeProfileLevel % 10; 1549 } 1550 1551 bool MethodData::profile_arguments() { 1552 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all; 1553 } 1554 1555 bool MethodData::profile_arguments_jsr292_only() { 1556 return profile_arguments_flag() == type_profile_jsr292; 1557 } 1558 1559 bool MethodData::profile_all_arguments() { 1560 return profile_arguments_flag() == type_profile_all; 1561 } 1562 1563 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1564 if (!profile_arguments()) { 1565 return false; 1566 } 1567 1568 if (profile_all_arguments()) { 1569 return true; 1570 } 1571 1572 if (profile_unsafe(m, bci)) { 1573 return true; 1574 } 1575 1576 assert(profile_arguments_jsr292_only(), "inconsistent"); 1577 return profile_jsr292(m, bci); 1578 } 1579 1580 int MethodData::profile_return_flag() { 1581 return (TypeProfileLevel % 100) / 10; 1582 } 1583 1584 bool MethodData::profile_return() { 1585 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1586 } 1587 1588 bool MethodData::profile_return_jsr292_only() { 1589 return profile_return_flag() == type_profile_jsr292; 1590 } 1591 1592 bool MethodData::profile_all_return() { 1593 return profile_return_flag() == type_profile_all; 1594 } 1595 1596 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1597 if (!profile_return()) { 1598 return false; 1599 } 1600 1601 if (profile_all_return()) { 1602 return true; 1603 } 1604 1605 assert(profile_return_jsr292_only(), "inconsistent"); 1606 return profile_jsr292(m, bci); 1607 } 1608 1609 int MethodData::profile_parameters_flag() { 1610 return TypeProfileLevel / 100; 1611 } 1612 1613 bool MethodData::profile_parameters() { 1614 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1615 } 1616 1617 bool MethodData::profile_parameters_jsr292_only() { 1618 return profile_parameters_flag() == type_profile_jsr292; 1619 } 1620 1621 bool MethodData::profile_all_parameters() { 1622 return profile_parameters_flag() == type_profile_all; 1623 } 1624 1625 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1626 if (!profile_parameters()) { 1627 return false; 1628 } 1629 1630 if (profile_all_parameters()) { 1631 return true; 1632 } 1633 1634 assert(profile_parameters_jsr292_only(), "inconsistent"); 1635 return m->is_compiled_lambda_form(); 1636 } 1637 1638 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1639 log_trace(cds)("Iter(MethodData): %p", this); 1640 it->push(&_method); 1641 } 1642 1643 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1644 if (shift == 0) { 1645 return; 1646 } 1647 if (!reset) { 1648 // Move all cells of trap entry at dp left by "shift" cells 1649 intptr_t* start = (intptr_t*)dp; 1650 intptr_t* end = (intptr_t*)next_extra(dp); 1651 for (intptr_t* ptr = start; ptr < end; ptr++) { 1652 *(ptr-shift) = *ptr; 1653 } 1654 } else { 1655 // Reset "shift" cells stopping at dp 1656 intptr_t* start = ((intptr_t*)dp) - shift; 1657 intptr_t* end = (intptr_t*)dp; 1658 for (intptr_t* ptr = start; ptr < end; ptr++) { 1659 *ptr = 0; 1660 } 1661 } 1662 } 1663 1664 class CleanExtraDataClosure : public StackObj { 1665 public: 1666 virtual bool is_live(Method* m) = 0; 1667 }; 1668 1669 // Check for entries that reference an unloaded method 1670 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1671 private: 1672 BoolObjectClosure* _is_alive; 1673 public: 1674 CleanExtraDataKlassClosure(BoolObjectClosure* is_alive) : _is_alive(is_alive) {} 1675 bool is_live(Method* m) { 1676 return m->method_holder()->is_loader_alive(_is_alive); 1677 } 1678 }; 1679 1680 // Check for entries that reference a redefined method 1681 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1682 public: 1683 CleanExtraDataMethodClosure() {} 1684 bool is_live(Method* m) { return !m->is_old(); } 1685 }; 1686 1687 1688 // Remove SpeculativeTrapData entries that reference an unloaded or 1689 // redefined method 1690 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1691 DataLayout* dp = extra_data_base(); 1692 DataLayout* end = args_data_limit(); 1693 1694 int shift = 0; 1695 for (; dp < end; dp = next_extra(dp)) { 1696 switch(dp->tag()) { 1697 case DataLayout::speculative_trap_data_tag: { 1698 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1699 Method* m = data->method(); 1700 assert(m != NULL, "should have a method"); 1701 if (!cl->is_live(m)) { 1702 // "shift" accumulates the number of cells for dead 1703 // SpeculativeTrapData entries that have been seen so 1704 // far. Following entries must be shifted left by that many 1705 // cells to remove the dead SpeculativeTrapData entries. 1706 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1707 } else { 1708 // Shift this entry left if it follows dead 1709 // SpeculativeTrapData entries 1710 clean_extra_data_helper(dp, shift); 1711 } 1712 break; 1713 } 1714 case DataLayout::bit_data_tag: 1715 // Shift this entry left if it follows dead SpeculativeTrapData 1716 // entries 1717 clean_extra_data_helper(dp, shift); 1718 continue; 1719 case DataLayout::no_tag: 1720 case DataLayout::arg_info_data_tag: 1721 // We are at end of the live trap entries. The previous "shift" 1722 // cells contain entries that are either dead or were shifted 1723 // left. They need to be reset to no_tag 1724 clean_extra_data_helper(dp, shift, true); 1725 return; 1726 default: 1727 fatal("unexpected tag %d", dp->tag()); 1728 } 1729 } 1730 } 1731 1732 // Verify there's no unloaded or redefined method referenced by a 1733 // SpeculativeTrapData entry 1734 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1735 #ifdef ASSERT 1736 DataLayout* dp = extra_data_base(); 1737 DataLayout* end = args_data_limit(); 1738 1739 for (; dp < end; dp = next_extra(dp)) { 1740 switch(dp->tag()) { 1741 case DataLayout::speculative_trap_data_tag: { 1742 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1743 Method* m = data->method(); 1744 assert(m != NULL && cl->is_live(m), "Method should exist"); 1745 break; 1746 } 1747 case DataLayout::bit_data_tag: 1748 continue; 1749 case DataLayout::no_tag: 1750 case DataLayout::arg_info_data_tag: 1751 return; 1752 default: 1753 fatal("unexpected tag %d", dp->tag()); 1754 } 1755 } 1756 #endif 1757 } 1758 1759 void MethodData::clean_method_data(BoolObjectClosure* is_alive) { 1760 ResourceMark rm; 1761 for (ProfileData* data = first_data(); 1762 is_valid(data); 1763 data = next_data(data)) { 1764 data->clean_weak_klass_links(is_alive); 1765 } 1766 ParametersTypeData* parameters = parameters_type_data(); 1767 if (parameters != NULL) { 1768 parameters->clean_weak_klass_links(is_alive); 1769 } 1770 1771 CleanExtraDataKlassClosure cl(is_alive); 1772 clean_extra_data(&cl); 1773 verify_extra_data_clean(&cl); 1774 } 1775 1776 void MethodData::clean_weak_method_links() { 1777 ResourceMark rm; 1778 for (ProfileData* data = first_data(); 1779 is_valid(data); 1780 data = next_data(data)) { 1781 data->clean_weak_method_links(); 1782 } 1783 1784 CleanExtraDataMethodClosure cl; 1785 clean_extra_data(&cl); 1786 verify_extra_data_clean(&cl); 1787 } 1788 1789 #ifdef ASSERT 1790 void MethodData::verify_clean_weak_method_links() { 1791 ResourceMark rm; 1792 for (ProfileData* data = first_data(); 1793 is_valid(data); 1794 data = next_data(data)) { 1795 data->verify_clean_weak_method_links(); 1796 } 1797 1798 CleanExtraDataMethodClosure cl; 1799 verify_extra_data_clean(&cl); 1800 } 1801 #endif // ASSERT