1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "compiler/compilerOracle.hpp" 28 #include "interpreter/bytecode.hpp" 29 #include "interpreter/bytecodeStream.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/heapInspection.hpp" 32 #include "memory/metaspaceClosure.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/methodData.inline.hpp" 35 #include "prims/jvmtiRedefineClasses.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/compilationPolicy.hpp" 38 #include "runtime/deoptimization.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/mutexLocker.inline.hpp" 41 #include "runtime/orderAccess.hpp" 42 #include "runtime/safepointVerifiers.hpp" 43 #include "utilities/align.hpp" 44 #include "utilities/copy.hpp" 45 46 // ================================================================== 47 // DataLayout 48 // 49 // Overlay for generic profiling data. 50 51 // Some types of data layouts need a length field. 52 bool DataLayout::needs_array_len(u1 tag) { 53 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 54 } 55 56 // Perform generic initialization of the data. More specific 57 // initialization occurs in overrides of ProfileData::post_initialize. 58 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 59 _header._bits = (intptr_t)0; 60 _header._struct._tag = tag; 61 _header._struct._bci = bci; 62 for (int i = 0; i < cell_count; i++) { 63 set_cell_at(i, (intptr_t)0); 64 } 65 if (needs_array_len(tag)) { 66 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 67 } 68 if (tag == call_type_data_tag) { 69 CallTypeData::initialize(this, cell_count); 70 } else if (tag == virtual_call_type_data_tag) { 71 VirtualCallTypeData::initialize(this, cell_count); 72 } 73 } 74 75 void DataLayout::clean_weak_klass_links(bool always_clean) { 76 ResourceMark m; 77 data_in()->clean_weak_klass_links(always_clean); 78 } 79 80 81 // ================================================================== 82 // ProfileData 83 // 84 // A ProfileData object is created to refer to a section of profiling 85 // data in a structured way. 86 87 // Constructor for invalid ProfileData. 88 ProfileData::ProfileData() { 89 _data = NULL; 90 } 91 92 char* ProfileData::print_data_on_helper(const MethodData* md) const { 93 DataLayout* dp = md->extra_data_base(); 94 DataLayout* end = md->args_data_limit(); 95 stringStream ss; 96 for (;; dp = MethodData::next_extra(dp)) { 97 assert(dp < end, "moved past end of extra data"); 98 switch(dp->tag()) { 99 case DataLayout::speculative_trap_data_tag: 100 if (dp->bci() == bci()) { 101 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 102 int trap = data->trap_state(); 103 char buf[100]; 104 ss.print("trap/"); 105 data->method()->print_short_name(&ss); 106 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 107 } 108 break; 109 case DataLayout::bit_data_tag: 110 break; 111 case DataLayout::no_tag: 112 case DataLayout::arg_info_data_tag: 113 return ss.as_string(); 114 break; 115 default: 116 fatal("unexpected tag %d", dp->tag()); 117 } 118 } 119 return NULL; 120 } 121 122 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 123 print_data_on(st, print_data_on_helper(md)); 124 } 125 126 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 127 st->print("bci: %d", bci()); 128 st->fill_to(tab_width_one); 129 st->print("%s", name); 130 tab(st); 131 int trap = trap_state(); 132 if (trap != 0) { 133 char buf[100]; 134 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 135 } 136 if (extra != NULL) { 137 st->print("%s", extra); 138 } 139 int flags = data()->flags(); 140 if (flags != 0) { 141 st->print("flags(%d) ", flags); 142 } 143 } 144 145 void ProfileData::tab(outputStream* st, bool first) const { 146 st->fill_to(first ? tab_width_one : tab_width_two); 147 } 148 149 // ================================================================== 150 // BitData 151 // 152 // A BitData corresponds to a one-bit flag. This is used to indicate 153 // whether a checkcast bytecode has seen a null value. 154 155 156 void BitData::print_data_on(outputStream* st, const char* extra) const { 157 print_shared(st, "BitData", extra); 158 st->cr(); 159 } 160 161 // ================================================================== 162 // CounterData 163 // 164 // A CounterData corresponds to a simple counter. 165 166 void CounterData::print_data_on(outputStream* st, const char* extra) const { 167 print_shared(st, "CounterData", extra); 168 st->print_cr("count(%u)", count()); 169 } 170 171 // ================================================================== 172 // JumpData 173 // 174 // A JumpData is used to access profiling information for a direct 175 // branch. It is a counter, used for counting the number of branches, 176 // plus a data displacement, used for realigning the data pointer to 177 // the corresponding target bci. 178 179 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 180 assert(stream->bci() == bci(), "wrong pos"); 181 int target; 182 Bytecodes::Code c = stream->code(); 183 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 184 target = stream->dest_w(); 185 } else { 186 target = stream->dest(); 187 } 188 int my_di = mdo->dp_to_di(dp()); 189 int target_di = mdo->bci_to_di(target); 190 int offset = target_di - my_di; 191 set_displacement(offset); 192 } 193 194 void JumpData::print_data_on(outputStream* st, const char* extra) const { 195 print_shared(st, "JumpData", extra); 196 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 197 } 198 199 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 200 // Parameter profiling include the receiver 201 int args_count = include_receiver ? 1 : 0; 202 ResourceMark rm; 203 SignatureStream ss(signature); 204 args_count += ss.reference_parameter_count(); 205 args_count = MIN2(args_count, max); 206 return args_count * per_arg_cell_count; 207 } 208 209 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 210 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 211 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 212 const methodHandle m = stream->method(); 213 int bci = stream->bci(); 214 Bytecode_invoke inv(m, bci); 215 int args_cell = 0; 216 if (MethodData::profile_arguments_for_invoke(m, bci)) { 217 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 218 } 219 int ret_cell = 0; 220 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) { 221 ret_cell = ReturnTypeEntry::static_cell_count(); 222 } 223 int header_cell = 0; 224 if (args_cell + ret_cell > 0) { 225 header_cell = header_cell_count(); 226 } 227 228 return header_cell + args_cell + ret_cell; 229 } 230 231 class ArgumentOffsetComputer : public SignatureInfo { 232 private: 233 int _max; 234 GrowableArray<int> _offsets; 235 236 void set(int size, BasicType type) { _size += size; } 237 void do_object(int begin, int end) { 238 if (_offsets.length() < _max) { 239 _offsets.push(_size); 240 } 241 SignatureInfo::do_object(begin, end); 242 } 243 void do_array (int begin, int end) { 244 if (_offsets.length() < _max) { 245 _offsets.push(_size); 246 } 247 SignatureInfo::do_array(begin, end); 248 } 249 250 public: 251 ArgumentOffsetComputer(Symbol* signature, int max) 252 : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) { 253 } 254 255 int total() { lazy_iterate_parameters(); return _size; } 256 257 int off_at(int i) const { return _offsets.at(i); } 258 }; 259 260 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 261 ResourceMark rm; 262 int start = 0; 263 // Parameter profiling include the receiver 264 if (include_receiver && has_receiver) { 265 set_stack_slot(0, 0); 266 set_type(0, type_none()); 267 start += 1; 268 } 269 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 270 aos.total(); 271 for (int i = start; i < _number_of_entries; i++) { 272 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 273 set_type(i, type_none()); 274 } 275 } 276 277 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 278 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 279 Bytecode_invoke inv(stream->method(), stream->bci()); 280 281 SignatureStream ss(inv.signature()); 282 if (has_arguments()) { 283 #ifdef ASSERT 284 ResourceMark rm; 285 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 286 assert(count > 0, "room for args type but none found?"); 287 check_number_of_arguments(count); 288 #endif 289 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 290 } 291 292 if (has_return()) { 293 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 294 _ret.post_initialize(); 295 } 296 } 297 298 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 299 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 300 Bytecode_invoke inv(stream->method(), stream->bci()); 301 302 if (has_arguments()) { 303 #ifdef ASSERT 304 ResourceMark rm; 305 SignatureStream ss(inv.signature()); 306 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit); 307 assert(count > 0, "room for args type but none found?"); 308 check_number_of_arguments(count); 309 #endif 310 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 311 } 312 313 if (has_return()) { 314 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 315 _ret.post_initialize(); 316 } 317 } 318 319 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { 320 for (int i = 0; i < _number_of_entries; i++) { 321 intptr_t p = type(i); 322 Klass* k = (Klass*)klass_part(p); 323 if (k != NULL && (always_clean || !k->is_loader_alive())) { 324 set_type(i, with_status((Klass*)NULL, p)); 325 } 326 } 327 } 328 329 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { 330 intptr_t p = type(); 331 Klass* k = (Klass*)klass_part(p); 332 if (k != NULL && (always_clean || !k->is_loader_alive())) { 333 set_type(with_status((Klass*)NULL, p)); 334 } 335 } 336 337 bool TypeEntriesAtCall::return_profiling_enabled() { 338 return MethodData::profile_return(); 339 } 340 341 bool TypeEntriesAtCall::arguments_profiling_enabled() { 342 return MethodData::profile_arguments(); 343 } 344 345 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 346 if (is_type_none(k)) { 347 st->print("none"); 348 } else if (is_type_unknown(k)) { 349 st->print("unknown"); 350 } else { 351 valid_klass(k)->print_value_on(st); 352 } 353 if (was_null_seen(k)) { 354 st->print(" (null seen)"); 355 } 356 } 357 358 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 359 for (int i = 0; i < _number_of_entries; i++) { 360 _pd->tab(st); 361 st->print("%d: stack(%u) ", i, stack_slot(i)); 362 print_klass(st, type(i)); 363 st->cr(); 364 } 365 } 366 367 void ReturnTypeEntry::print_data_on(outputStream* st) const { 368 _pd->tab(st); 369 print_klass(st, type()); 370 st->cr(); 371 } 372 373 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 374 CounterData::print_data_on(st, extra); 375 if (has_arguments()) { 376 tab(st, true); 377 st->print("argument types"); 378 _args.print_data_on(st); 379 } 380 if (has_return()) { 381 tab(st, true); 382 st->print("return type"); 383 _ret.print_data_on(st); 384 } 385 } 386 387 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 388 VirtualCallData::print_data_on(st, extra); 389 if (has_arguments()) { 390 tab(st, true); 391 st->print("argument types"); 392 _args.print_data_on(st); 393 } 394 if (has_return()) { 395 tab(st, true); 396 st->print("return type"); 397 _ret.print_data_on(st); 398 } 399 } 400 401 // ================================================================== 402 // ReceiverTypeData 403 // 404 // A ReceiverTypeData is used to access profiling information about a 405 // dynamic type check. It consists of a counter which counts the total times 406 // that the check is reached, and a series of (Klass*, count) pairs 407 // which are used to store a type profile for the receiver of the check. 408 409 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { 410 for (uint row = 0; row < row_limit(); row++) { 411 Klass* p = receiver(row); 412 if (p != NULL && (always_clean || !p->is_loader_alive())) { 413 clear_row(row); 414 } 415 } 416 } 417 418 #if INCLUDE_JVMCI 419 void VirtualCallData::clean_weak_klass_links(bool always_clean) { 420 ReceiverTypeData::clean_weak_klass_links(always_clean); 421 for (uint row = 0; row < method_row_limit(); row++) { 422 Method* p = method(row); 423 if (p != NULL && (always_clean || !p->method_holder()->is_loader_alive())) { 424 clear_method_row(row); 425 } 426 } 427 } 428 429 void VirtualCallData::clean_weak_method_links() { 430 ReceiverTypeData::clean_weak_method_links(); 431 for (uint row = 0; row < method_row_limit(); row++) { 432 Method* p = method(row); 433 if (p != NULL && p->is_old()) { 434 clear_method_row(row); 435 } 436 } 437 } 438 #endif // INCLUDE_JVMCI 439 440 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 441 uint row; 442 int entries = 0; 443 for (row = 0; row < row_limit(); row++) { 444 if (receiver(row) != NULL) entries++; 445 } 446 #if INCLUDE_JVMCI 447 st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries); 448 #else 449 st->print_cr("count(%u) entries(%u)", count(), entries); 450 #endif 451 int total = count(); 452 for (row = 0; row < row_limit(); row++) { 453 if (receiver(row) != NULL) { 454 total += receiver_count(row); 455 } 456 } 457 for (row = 0; row < row_limit(); row++) { 458 if (receiver(row) != NULL) { 459 tab(st); 460 receiver(row)->print_value_on(st); 461 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 462 } 463 } 464 } 465 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 466 print_shared(st, "ReceiverTypeData", extra); 467 print_receiver_data_on(st); 468 } 469 470 #if INCLUDE_JVMCI 471 void VirtualCallData::print_method_data_on(outputStream* st) const { 472 uint row; 473 int entries = 0; 474 for (row = 0; row < method_row_limit(); row++) { 475 if (method(row) != NULL) entries++; 476 } 477 tab(st); 478 st->print_cr("method_entries(%u)", entries); 479 int total = count(); 480 for (row = 0; row < method_row_limit(); row++) { 481 if (method(row) != NULL) { 482 total += method_count(row); 483 } 484 } 485 for (row = 0; row < method_row_limit(); row++) { 486 if (method(row) != NULL) { 487 tab(st); 488 method(row)->print_value_on(st); 489 st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total); 490 } 491 } 492 } 493 #endif // INCLUDE_JVMCI 494 495 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 496 print_shared(st, "VirtualCallData", extra); 497 print_receiver_data_on(st); 498 print_method_data_on(st); 499 } 500 501 // ================================================================== 502 // RetData 503 // 504 // A RetData is used to access profiling information for a ret bytecode. 505 // It is composed of a count of the number of times that the ret has 506 // been executed, followed by a series of triples of the form 507 // (bci, count, di) which count the number of times that some bci was the 508 // target of the ret and cache a corresponding displacement. 509 510 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 511 for (uint row = 0; row < row_limit(); row++) { 512 set_bci_displacement(row, -1); 513 set_bci(row, no_bci); 514 } 515 // release so other threads see a consistent state. bci is used as 516 // a valid flag for bci_displacement. 517 OrderAccess::release(); 518 } 519 520 // This routine needs to atomically update the RetData structure, so the 521 // caller needs to hold the RetData_lock before it gets here. Since taking 522 // the lock can block (and allow GC) and since RetData is a ProfileData is a 523 // wrapper around a derived oop, taking the lock in _this_ method will 524 // basically cause the 'this' pointer's _data field to contain junk after the 525 // lock. We require the caller to take the lock before making the ProfileData 526 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 527 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 528 // First find the mdp which corresponds to the return bci. 529 address mdp = h_mdo->bci_to_dp(return_bci); 530 531 // Now check to see if any of the cache slots are open. 532 for (uint row = 0; row < row_limit(); row++) { 533 if (bci(row) == no_bci) { 534 set_bci_displacement(row, mdp - dp()); 535 set_bci_count(row, DataLayout::counter_increment); 536 // Barrier to ensure displacement is written before the bci; allows 537 // the interpreter to read displacement without fear of race condition. 538 release_set_bci(row, return_bci); 539 break; 540 } 541 } 542 return mdp; 543 } 544 545 void RetData::print_data_on(outputStream* st, const char* extra) const { 546 print_shared(st, "RetData", extra); 547 uint row; 548 int entries = 0; 549 for (row = 0; row < row_limit(); row++) { 550 if (bci(row) != no_bci) entries++; 551 } 552 st->print_cr("count(%u) entries(%u)", count(), entries); 553 for (row = 0; row < row_limit(); row++) { 554 if (bci(row) != no_bci) { 555 tab(st); 556 st->print_cr("bci(%d: count(%u) displacement(%d))", 557 bci(row), bci_count(row), bci_displacement(row)); 558 } 559 } 560 } 561 562 // ================================================================== 563 // BranchData 564 // 565 // A BranchData is used to access profiling data for a two-way branch. 566 // It consists of taken and not_taken counts as well as a data displacement 567 // for the taken case. 568 569 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 570 assert(stream->bci() == bci(), "wrong pos"); 571 int target = stream->dest(); 572 int my_di = mdo->dp_to_di(dp()); 573 int target_di = mdo->bci_to_di(target); 574 int offset = target_di - my_di; 575 set_displacement(offset); 576 } 577 578 void BranchData::print_data_on(outputStream* st, const char* extra) const { 579 print_shared(st, "BranchData", extra); 580 st->print_cr("taken(%u) displacement(%d)", 581 taken(), displacement()); 582 tab(st); 583 st->print_cr("not taken(%u)", not_taken()); 584 } 585 586 // ================================================================== 587 // MultiBranchData 588 // 589 // A MultiBranchData is used to access profiling information for 590 // a multi-way branch (*switch bytecodes). It consists of a series 591 // of (count, displacement) pairs, which count the number of times each 592 // case was taken and specify the data displacment for each branch target. 593 594 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 595 int cell_count = 0; 596 if (stream->code() == Bytecodes::_tableswitch) { 597 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 598 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 599 } else { 600 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 601 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 602 } 603 return cell_count; 604 } 605 606 void MultiBranchData::post_initialize(BytecodeStream* stream, 607 MethodData* mdo) { 608 assert(stream->bci() == bci(), "wrong pos"); 609 int target; 610 int my_di; 611 int target_di; 612 int offset; 613 if (stream->code() == Bytecodes::_tableswitch) { 614 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 615 int len = sw.length(); 616 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 617 for (int count = 0; count < len; count++) { 618 target = sw.dest_offset_at(count) + bci(); 619 my_di = mdo->dp_to_di(dp()); 620 target_di = mdo->bci_to_di(target); 621 offset = target_di - my_di; 622 set_displacement_at(count, offset); 623 } 624 target = sw.default_offset() + bci(); 625 my_di = mdo->dp_to_di(dp()); 626 target_di = mdo->bci_to_di(target); 627 offset = target_di - my_di; 628 set_default_displacement(offset); 629 630 } else { 631 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 632 int npairs = sw.number_of_pairs(); 633 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 634 for (int count = 0; count < npairs; count++) { 635 LookupswitchPair pair = sw.pair_at(count); 636 target = pair.offset() + bci(); 637 my_di = mdo->dp_to_di(dp()); 638 target_di = mdo->bci_to_di(target); 639 offset = target_di - my_di; 640 set_displacement_at(count, offset); 641 } 642 target = sw.default_offset() + bci(); 643 my_di = mdo->dp_to_di(dp()); 644 target_di = mdo->bci_to_di(target); 645 offset = target_di - my_di; 646 set_default_displacement(offset); 647 } 648 } 649 650 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 651 print_shared(st, "MultiBranchData", extra); 652 st->print_cr("default_count(%u) displacement(%d)", 653 default_count(), default_displacement()); 654 int cases = number_of_cases(); 655 for (int i = 0; i < cases; i++) { 656 tab(st); 657 st->print_cr("count(%u) displacement(%d)", 658 count_at(i), displacement_at(i)); 659 } 660 } 661 662 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 663 print_shared(st, "ArgInfoData", extra); 664 int nargs = number_of_args(); 665 for (int i = 0; i < nargs; i++) { 666 st->print(" 0x%x", arg_modified(i)); 667 } 668 st->cr(); 669 } 670 671 int ParametersTypeData::compute_cell_count(Method* m) { 672 if (!MethodData::profile_parameters_for_method(m)) { 673 return 0; 674 } 675 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 676 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 677 if (obj_args > 0) { 678 return obj_args + 1; // 1 cell for array len 679 } 680 return 0; 681 } 682 683 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 684 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 685 } 686 687 bool ParametersTypeData::profiling_enabled() { 688 return MethodData::profile_parameters(); 689 } 690 691 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 692 st->print("parameter types"); // FIXME extra ignored? 693 _parameters.print_data_on(st); 694 } 695 696 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 697 print_shared(st, "SpeculativeTrapData", extra); 698 tab(st); 699 method()->print_short_name(st); 700 st->cr(); 701 } 702 703 // ================================================================== 704 // MethodData* 705 // 706 // A MethodData* holds information which has been collected about 707 // a method. 708 709 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 710 int size = MethodData::compute_allocation_size_in_words(method); 711 712 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 713 MethodData(method(), size, THREAD); 714 } 715 716 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 717 if (is_client_compilation_mode_vm()) { 718 return no_profile_data; 719 } 720 switch (code) { 721 case Bytecodes::_checkcast: 722 case Bytecodes::_instanceof: 723 case Bytecodes::_aastore: 724 if (TypeProfileCasts) { 725 return ReceiverTypeData::static_cell_count(); 726 } else { 727 return BitData::static_cell_count(); 728 } 729 case Bytecodes::_invokespecial: 730 case Bytecodes::_invokestatic: 731 if (MethodData::profile_arguments() || MethodData::profile_return()) { 732 return variable_cell_count; 733 } else { 734 return CounterData::static_cell_count(); 735 } 736 case Bytecodes::_goto: 737 case Bytecodes::_goto_w: 738 case Bytecodes::_jsr: 739 case Bytecodes::_jsr_w: 740 return JumpData::static_cell_count(); 741 case Bytecodes::_invokevirtual: 742 case Bytecodes::_invokeinterface: 743 if (MethodData::profile_arguments() || MethodData::profile_return()) { 744 return variable_cell_count; 745 } else { 746 return VirtualCallData::static_cell_count(); 747 } 748 case Bytecodes::_invokedynamic: 749 if (MethodData::profile_arguments() || MethodData::profile_return()) { 750 return variable_cell_count; 751 } else { 752 return CounterData::static_cell_count(); 753 } 754 case Bytecodes::_ret: 755 return RetData::static_cell_count(); 756 case Bytecodes::_ifeq: 757 case Bytecodes::_ifne: 758 case Bytecodes::_iflt: 759 case Bytecodes::_ifge: 760 case Bytecodes::_ifgt: 761 case Bytecodes::_ifle: 762 case Bytecodes::_if_icmpeq: 763 case Bytecodes::_if_icmpne: 764 case Bytecodes::_if_icmplt: 765 case Bytecodes::_if_icmpge: 766 case Bytecodes::_if_icmpgt: 767 case Bytecodes::_if_icmple: 768 case Bytecodes::_if_acmpeq: 769 case Bytecodes::_if_acmpne: 770 case Bytecodes::_ifnull: 771 case Bytecodes::_ifnonnull: 772 return BranchData::static_cell_count(); 773 case Bytecodes::_lookupswitch: 774 case Bytecodes::_tableswitch: 775 return variable_cell_count; 776 default: 777 return no_profile_data; 778 } 779 } 780 781 // Compute the size of the profiling information corresponding to 782 // the current bytecode. 783 int MethodData::compute_data_size(BytecodeStream* stream) { 784 int cell_count = bytecode_cell_count(stream->code()); 785 if (cell_count == no_profile_data) { 786 return 0; 787 } 788 if (cell_count == variable_cell_count) { 789 switch (stream->code()) { 790 case Bytecodes::_lookupswitch: 791 case Bytecodes::_tableswitch: 792 cell_count = MultiBranchData::compute_cell_count(stream); 793 break; 794 case Bytecodes::_invokespecial: 795 case Bytecodes::_invokestatic: 796 case Bytecodes::_invokedynamic: 797 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 798 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 799 profile_return_for_invoke(stream->method(), stream->bci())) { 800 cell_count = CallTypeData::compute_cell_count(stream); 801 } else { 802 cell_count = CounterData::static_cell_count(); 803 } 804 break; 805 case Bytecodes::_invokevirtual: 806 case Bytecodes::_invokeinterface: { 807 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 808 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 809 profile_return_for_invoke(stream->method(), stream->bci())) { 810 cell_count = VirtualCallTypeData::compute_cell_count(stream); 811 } else { 812 cell_count = VirtualCallData::static_cell_count(); 813 } 814 break; 815 } 816 default: 817 fatal("unexpected bytecode for var length profile data"); 818 } 819 } 820 // Note: cell_count might be zero, meaning that there is just 821 // a DataLayout header, with no extra cells. 822 assert(cell_count >= 0, "sanity"); 823 return DataLayout::compute_size_in_bytes(cell_count); 824 } 825 826 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 827 // Bytecodes for which we may use speculation 828 switch (code) { 829 case Bytecodes::_checkcast: 830 case Bytecodes::_instanceof: 831 case Bytecodes::_aastore: 832 case Bytecodes::_invokevirtual: 833 case Bytecodes::_invokeinterface: 834 case Bytecodes::_if_acmpeq: 835 case Bytecodes::_if_acmpne: 836 case Bytecodes::_ifnull: 837 case Bytecodes::_ifnonnull: 838 case Bytecodes::_invokestatic: 839 #ifdef COMPILER2 840 if (is_server_compilation_mode_vm()) { 841 return UseTypeSpeculation; 842 } 843 #endif 844 default: 845 return false; 846 } 847 return false; 848 } 849 850 #if INCLUDE_JVMCI 851 852 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { 853 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow); 854 } 855 856 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) { 857 memcpy(data(), speculation, speculation_len); 858 } 859 860 // A heuristic check to detect nmethods that outlive a failed speculations list. 861 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) { 862 jlong head = (jlong)(address) *failed_speculations_address; 863 if ((head & 0x1) == 0x1) { 864 stringStream st; 865 if (nm != NULL) { 866 st.print("%d", nm->compile_id()); 867 Method* method = nm->method(); 868 st.print_raw("{"); 869 if (method != NULL) { 870 method->print_name(&st); 871 } else { 872 const char* jvmci_name = nm->jvmci_name(); 873 if (jvmci_name != NULL) { 874 st.print_raw(jvmci_name); 875 } 876 } 877 st.print_raw("}"); 878 } else { 879 st.print("<unknown>"); 880 } 881 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string()); 882 } 883 } 884 885 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { 886 assert(failed_speculations_address != NULL, "must be"); 887 size_t fs_size = sizeof(FailedSpeculation) + speculation_len; 888 FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len); 889 if (fs == NULL) { 890 // no memory -> ignore failed speculation 891 return false; 892 } 893 894 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); 895 guarantee_failed_speculations_alive(nm, failed_speculations_address); 896 897 FailedSpeculation** cursor = failed_speculations_address; 898 do { 899 if (*cursor == NULL) { 900 FailedSpeculation* old_fs = Atomic::cmpxchg(fs, cursor, (FailedSpeculation*) NULL); 901 if (old_fs == NULL) { 902 // Successfully appended fs to end of the list 903 return true; 904 } 905 cursor = old_fs->next_adr(); 906 } else { 907 cursor = (*cursor)->next_adr(); 908 } 909 } while (true); 910 } 911 912 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { 913 assert(failed_speculations_address != NULL, "must be"); 914 FailedSpeculation* fs = *failed_speculations_address; 915 while (fs != NULL) { 916 FailedSpeculation* next = fs->next(); 917 delete fs; 918 fs = next; 919 } 920 921 // Write an unaligned value to failed_speculations_address to denote 922 // that it is no longer a valid pointer. This is allows for the check 923 // in add_failed_speculation against adding to a freed failed 924 // speculations list. 925 long* head = (long*) failed_speculations_address; 926 (*head) = (*head) | 0x1; 927 } 928 #endif // INCLUDE_JVMCI 929 930 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 931 #if INCLUDE_JVMCI 932 if (ProfileTraps) { 933 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 934 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 935 936 // Make sure we have a minimum number of extra data slots to 937 // allocate SpeculativeTrapData entries. We would want to have one 938 // entry per compilation that inlines this method and for which 939 // some type speculation assumption fails. So the room we need for 940 // the SpeculativeTrapData entries doesn't directly depend on the 941 // size of the method. Because it's hard to estimate, we reserve 942 // space for an arbitrary number of entries. 943 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 944 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 945 946 return MAX2(extra_data_count, spec_data_count); 947 } else { 948 return 0; 949 } 950 #else // INCLUDE_JVMCI 951 if (ProfileTraps) { 952 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 953 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 954 // If the method is large, let the extra BCIs grow numerous (to ~1%). 955 int one_percent_of_data 956 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 957 if (extra_data_count < one_percent_of_data) 958 extra_data_count = one_percent_of_data; 959 if (extra_data_count > empty_bc_count) 960 extra_data_count = empty_bc_count; // no need for more 961 962 // Make sure we have a minimum number of extra data slots to 963 // allocate SpeculativeTrapData entries. We would want to have one 964 // entry per compilation that inlines this method and for which 965 // some type speculation assumption fails. So the room we need for 966 // the SpeculativeTrapData entries doesn't directly depend on the 967 // size of the method. Because it's hard to estimate, we reserve 968 // space for an arbitrary number of entries. 969 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 970 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 971 972 return MAX2(extra_data_count, spec_data_count); 973 } else { 974 return 0; 975 } 976 #endif // INCLUDE_JVMCI 977 } 978 979 // Compute the size of the MethodData* necessary to store 980 // profiling information about a given method. Size is in bytes. 981 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 982 int data_size = 0; 983 BytecodeStream stream(method); 984 Bytecodes::Code c; 985 int empty_bc_count = 0; // number of bytecodes lacking data 986 bool needs_speculative_traps = false; 987 while ((c = stream.next()) >= 0) { 988 int size_in_bytes = compute_data_size(&stream); 989 data_size += size_in_bytes; 990 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 991 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 992 } 993 int object_size = in_bytes(data_offset()) + data_size; 994 995 // Add some extra DataLayout cells (at least one) to track stray traps. 996 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 997 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 998 999 // Add a cell to record information about modified arguments. 1000 int arg_size = method->size_of_parameters(); 1001 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 1002 1003 // Reserve room for an area of the MDO dedicated to profiling of 1004 // parameters 1005 int args_cell = ParametersTypeData::compute_cell_count(method()); 1006 if (args_cell > 0) { 1007 object_size += DataLayout::compute_size_in_bytes(args_cell); 1008 } 1009 return object_size; 1010 } 1011 1012 // Compute the size of the MethodData* necessary to store 1013 // profiling information about a given method. Size is in words 1014 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 1015 int byte_size = compute_allocation_size_in_bytes(method); 1016 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 1017 return align_metadata_size(word_size); 1018 } 1019 1020 // Initialize an individual data segment. Returns the size of 1021 // the segment in bytes. 1022 int MethodData::initialize_data(BytecodeStream* stream, 1023 int data_index) { 1024 if (is_client_compilation_mode_vm()) { 1025 return 0; 1026 } 1027 int cell_count = -1; 1028 int tag = DataLayout::no_tag; 1029 DataLayout* data_layout = data_layout_at(data_index); 1030 Bytecodes::Code c = stream->code(); 1031 switch (c) { 1032 case Bytecodes::_checkcast: 1033 case Bytecodes::_instanceof: 1034 case Bytecodes::_aastore: 1035 if (TypeProfileCasts) { 1036 cell_count = ReceiverTypeData::static_cell_count(); 1037 tag = DataLayout::receiver_type_data_tag; 1038 } else { 1039 cell_count = BitData::static_cell_count(); 1040 tag = DataLayout::bit_data_tag; 1041 } 1042 break; 1043 case Bytecodes::_invokespecial: 1044 case Bytecodes::_invokestatic: { 1045 int counter_data_cell_count = CounterData::static_cell_count(); 1046 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1047 profile_return_for_invoke(stream->method(), stream->bci())) { 1048 cell_count = CallTypeData::compute_cell_count(stream); 1049 } else { 1050 cell_count = counter_data_cell_count; 1051 } 1052 if (cell_count > counter_data_cell_count) { 1053 tag = DataLayout::call_type_data_tag; 1054 } else { 1055 tag = DataLayout::counter_data_tag; 1056 } 1057 break; 1058 } 1059 case Bytecodes::_goto: 1060 case Bytecodes::_goto_w: 1061 case Bytecodes::_jsr: 1062 case Bytecodes::_jsr_w: 1063 cell_count = JumpData::static_cell_count(); 1064 tag = DataLayout::jump_data_tag; 1065 break; 1066 case Bytecodes::_invokevirtual: 1067 case Bytecodes::_invokeinterface: { 1068 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 1069 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1070 profile_return_for_invoke(stream->method(), stream->bci())) { 1071 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1072 } else { 1073 cell_count = virtual_call_data_cell_count; 1074 } 1075 if (cell_count > virtual_call_data_cell_count) { 1076 tag = DataLayout::virtual_call_type_data_tag; 1077 } else { 1078 tag = DataLayout::virtual_call_data_tag; 1079 } 1080 break; 1081 } 1082 case Bytecodes::_invokedynamic: { 1083 // %%% should make a type profile for any invokedynamic that takes a ref argument 1084 int counter_data_cell_count = CounterData::static_cell_count(); 1085 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1086 profile_return_for_invoke(stream->method(), stream->bci())) { 1087 cell_count = CallTypeData::compute_cell_count(stream); 1088 } else { 1089 cell_count = counter_data_cell_count; 1090 } 1091 if (cell_count > counter_data_cell_count) { 1092 tag = DataLayout::call_type_data_tag; 1093 } else { 1094 tag = DataLayout::counter_data_tag; 1095 } 1096 break; 1097 } 1098 case Bytecodes::_ret: 1099 cell_count = RetData::static_cell_count(); 1100 tag = DataLayout::ret_data_tag; 1101 break; 1102 case Bytecodes::_ifeq: 1103 case Bytecodes::_ifne: 1104 case Bytecodes::_iflt: 1105 case Bytecodes::_ifge: 1106 case Bytecodes::_ifgt: 1107 case Bytecodes::_ifle: 1108 case Bytecodes::_if_icmpeq: 1109 case Bytecodes::_if_icmpne: 1110 case Bytecodes::_if_icmplt: 1111 case Bytecodes::_if_icmpge: 1112 case Bytecodes::_if_icmpgt: 1113 case Bytecodes::_if_icmple: 1114 case Bytecodes::_if_acmpeq: 1115 case Bytecodes::_if_acmpne: 1116 case Bytecodes::_ifnull: 1117 case Bytecodes::_ifnonnull: 1118 cell_count = BranchData::static_cell_count(); 1119 tag = DataLayout::branch_data_tag; 1120 break; 1121 case Bytecodes::_lookupswitch: 1122 case Bytecodes::_tableswitch: 1123 cell_count = MultiBranchData::compute_cell_count(stream); 1124 tag = DataLayout::multi_branch_data_tag; 1125 break; 1126 default: 1127 break; 1128 } 1129 assert(tag == DataLayout::multi_branch_data_tag || 1130 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1131 (tag == DataLayout::call_type_data_tag || 1132 tag == DataLayout::counter_data_tag || 1133 tag == DataLayout::virtual_call_type_data_tag || 1134 tag == DataLayout::virtual_call_data_tag)) || 1135 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1136 if (cell_count >= 0) { 1137 assert(tag != DataLayout::no_tag, "bad tag"); 1138 assert(bytecode_has_profile(c), "agree w/ BHP"); 1139 data_layout->initialize(tag, stream->bci(), cell_count); 1140 return DataLayout::compute_size_in_bytes(cell_count); 1141 } else { 1142 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1143 return 0; 1144 } 1145 } 1146 1147 // Get the data at an arbitrary (sort of) data index. 1148 ProfileData* MethodData::data_at(int data_index) const { 1149 if (out_of_bounds(data_index)) { 1150 return NULL; 1151 } 1152 DataLayout* data_layout = data_layout_at(data_index); 1153 return data_layout->data_in(); 1154 } 1155 1156 ProfileData* DataLayout::data_in() { 1157 switch (tag()) { 1158 case DataLayout::no_tag: 1159 default: 1160 ShouldNotReachHere(); 1161 return NULL; 1162 case DataLayout::bit_data_tag: 1163 return new BitData(this); 1164 case DataLayout::counter_data_tag: 1165 return new CounterData(this); 1166 case DataLayout::jump_data_tag: 1167 return new JumpData(this); 1168 case DataLayout::receiver_type_data_tag: 1169 return new ReceiverTypeData(this); 1170 case DataLayout::virtual_call_data_tag: 1171 return new VirtualCallData(this); 1172 case DataLayout::ret_data_tag: 1173 return new RetData(this); 1174 case DataLayout::branch_data_tag: 1175 return new BranchData(this); 1176 case DataLayout::multi_branch_data_tag: 1177 return new MultiBranchData(this); 1178 case DataLayout::arg_info_data_tag: 1179 return new ArgInfoData(this); 1180 case DataLayout::call_type_data_tag: 1181 return new CallTypeData(this); 1182 case DataLayout::virtual_call_type_data_tag: 1183 return new VirtualCallTypeData(this); 1184 case DataLayout::parameters_type_data_tag: 1185 return new ParametersTypeData(this); 1186 case DataLayout::speculative_trap_data_tag: 1187 return new SpeculativeTrapData(this); 1188 } 1189 } 1190 1191 // Iteration over data. 1192 ProfileData* MethodData::next_data(ProfileData* current) const { 1193 int current_index = dp_to_di(current->dp()); 1194 int next_index = current_index + current->size_in_bytes(); 1195 ProfileData* next = data_at(next_index); 1196 return next; 1197 } 1198 1199 // Give each of the data entries a chance to perform specific 1200 // data initialization. 1201 void MethodData::post_initialize(BytecodeStream* stream) { 1202 ResourceMark rm; 1203 ProfileData* data; 1204 for (data = first_data(); is_valid(data); data = next_data(data)) { 1205 stream->set_start(data->bci()); 1206 stream->next(); 1207 data->post_initialize(stream, this); 1208 } 1209 if (_parameters_type_data_di != no_parameters) { 1210 parameters_type_data()->post_initialize(NULL, this); 1211 } 1212 } 1213 1214 // Initialize the MethodData* corresponding to a given method. 1215 MethodData::MethodData(const methodHandle& method, int size, TRAPS) 1216 : _extra_data_lock(Mutex::leaf, "MDO extra data lock"), 1217 _parameters_type_data_di(parameters_uninitialized) { 1218 // Set the method back-pointer. 1219 _method = method(); 1220 initialize(); 1221 } 1222 1223 void MethodData::initialize() { 1224 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1225 ResourceMark rm; 1226 1227 init(); 1228 set_creation_mileage(mileage_of(method())); 1229 1230 // Go through the bytecodes and allocate and initialize the 1231 // corresponding data cells. 1232 int data_size = 0; 1233 int empty_bc_count = 0; // number of bytecodes lacking data 1234 _data[0] = 0; // apparently not set below. 1235 BytecodeStream stream(method()); 1236 Bytecodes::Code c; 1237 bool needs_speculative_traps = false; 1238 while ((c = stream.next()) >= 0) { 1239 int size_in_bytes = initialize_data(&stream, data_size); 1240 data_size += size_in_bytes; 1241 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1242 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1243 } 1244 _data_size = data_size; 1245 int object_size = in_bytes(data_offset()) + data_size; 1246 1247 // Add some extra DataLayout cells (at least one) to track stray traps. 1248 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1249 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1250 1251 // Let's zero the space for the extra data 1252 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1253 1254 // Add a cell to record information about modified arguments. 1255 // Set up _args_modified array after traps cells so that 1256 // the code for traps cells works. 1257 DataLayout *dp = data_layout_at(data_size + extra_size); 1258 1259 int arg_size = method()->size_of_parameters(); 1260 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1261 1262 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1263 object_size += extra_size + arg_data_size; 1264 1265 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1266 // If we are profiling parameters, we reserver an area near the end 1267 // of the MDO after the slots for bytecodes (because there's no bci 1268 // for method entry so they don't fit with the framework for the 1269 // profiling of bytecodes). We store the offset within the MDO of 1270 // this area (or -1 if no parameter is profiled) 1271 if (parms_cell > 0) { 1272 object_size += DataLayout::compute_size_in_bytes(parms_cell); 1273 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1274 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1275 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1276 } else { 1277 _parameters_type_data_di = no_parameters; 1278 } 1279 1280 // Set an initial hint. Don't use set_hint_di() because 1281 // first_di() may be out of bounds if data_size is 0. 1282 // In that situation, _hint_di is never used, but at 1283 // least well-defined. 1284 _hint_di = first_di(); 1285 1286 post_initialize(&stream); 1287 1288 assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size"); 1289 set_size(object_size); 1290 } 1291 1292 void MethodData::init() { 1293 _invocation_counter.init(); 1294 _backedge_counter.init(); 1295 _invocation_counter_start = 0; 1296 _backedge_counter_start = 0; 1297 1298 // Set per-method invoke- and backedge mask. 1299 double scale = 1.0; 1300 CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale); 1301 _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1302 _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1303 1304 _tenure_traps = 0; 1305 _num_loops = 0; 1306 _num_blocks = 0; 1307 _would_profile = unknown; 1308 1309 #if INCLUDE_JVMCI 1310 _jvmci_ir_size = 0; 1311 _failed_speculations = NULL; 1312 #endif 1313 1314 #if INCLUDE_RTM_OPT 1315 _rtm_state = NoRTM; // No RTM lock eliding by default 1316 if (UseRTMLocking && 1317 !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) { 1318 if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) { 1319 // Generate RTM lock eliding code without abort ratio calculation code. 1320 _rtm_state = UseRTM; 1321 } else if (UseRTMDeopt) { 1322 // Generate RTM lock eliding code and include abort ratio calculation 1323 // code if UseRTMDeopt is on. 1324 _rtm_state = ProfileRTM; 1325 } 1326 } 1327 #endif 1328 1329 // Initialize flags and trap history. 1330 _nof_decompiles = 0; 1331 _nof_overflow_recompiles = 0; 1332 _nof_overflow_traps = 0; 1333 clear_escape_info(); 1334 assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align"); 1335 Copy::zero_to_words((HeapWord*) &_trap_hist, 1336 sizeof(_trap_hist) / sizeof(HeapWord)); 1337 } 1338 1339 // Get a measure of how much mileage the method has on it. 1340 int MethodData::mileage_of(Method* method) { 1341 int mileage = 0; 1342 if (TieredCompilation) { 1343 mileage = MAX2(method->invocation_count(), method->backedge_count()); 1344 } else { 1345 int iic = method->interpreter_invocation_count(); 1346 if (mileage < iic) mileage = iic; 1347 MethodCounters* mcs = method->method_counters(); 1348 if (mcs != NULL) { 1349 InvocationCounter* ic = mcs->invocation_counter(); 1350 InvocationCounter* bc = mcs->backedge_counter(); 1351 int icval = ic->count(); 1352 if (ic->carry()) icval += CompileThreshold; 1353 if (mileage < icval) mileage = icval; 1354 int bcval = bc->count(); 1355 if (bc->carry()) bcval += CompileThreshold; 1356 if (mileage < bcval) mileage = bcval; 1357 } 1358 } 1359 return mileage; 1360 } 1361 1362 bool MethodData::is_mature() const { 1363 return CompilationPolicy::policy()->is_mature(_method); 1364 } 1365 1366 // Translate a bci to its corresponding data index (di). 1367 address MethodData::bci_to_dp(int bci) { 1368 ResourceMark rm; 1369 ProfileData* data = data_before(bci); 1370 ProfileData* prev = NULL; 1371 for ( ; is_valid(data); data = next_data(data)) { 1372 if (data->bci() >= bci) { 1373 if (data->bci() == bci) set_hint_di(dp_to_di(data->dp())); 1374 else if (prev != NULL) set_hint_di(dp_to_di(prev->dp())); 1375 return data->dp(); 1376 } 1377 prev = data; 1378 } 1379 return (address)limit_data_position(); 1380 } 1381 1382 // Translate a bci to its corresponding data, or NULL. 1383 ProfileData* MethodData::bci_to_data(int bci) { 1384 ProfileData* data = data_before(bci); 1385 for ( ; is_valid(data); data = next_data(data)) { 1386 if (data->bci() == bci) { 1387 set_hint_di(dp_to_di(data->dp())); 1388 return data; 1389 } else if (data->bci() > bci) { 1390 break; 1391 } 1392 } 1393 return bci_to_extra_data(bci, NULL, false); 1394 } 1395 1396 DataLayout* MethodData::next_extra(DataLayout* dp) { 1397 int nb_cells = 0; 1398 switch(dp->tag()) { 1399 case DataLayout::bit_data_tag: 1400 case DataLayout::no_tag: 1401 nb_cells = BitData::static_cell_count(); 1402 break; 1403 case DataLayout::speculative_trap_data_tag: 1404 nb_cells = SpeculativeTrapData::static_cell_count(); 1405 break; 1406 default: 1407 fatal("unexpected tag %d", dp->tag()); 1408 } 1409 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1410 } 1411 1412 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) { 1413 DataLayout* end = args_data_limit(); 1414 1415 for (;; dp = next_extra(dp)) { 1416 assert(dp < end, "moved past end of extra data"); 1417 // No need for "OrderAccess::load_acquire" ops, 1418 // since the data structure is monotonic. 1419 switch(dp->tag()) { 1420 case DataLayout::no_tag: 1421 return NULL; 1422 case DataLayout::arg_info_data_tag: 1423 dp = end; 1424 return NULL; // ArgInfoData is at the end of extra data section. 1425 case DataLayout::bit_data_tag: 1426 if (m == NULL && dp->bci() == bci) { 1427 return new BitData(dp); 1428 } 1429 break; 1430 case DataLayout::speculative_trap_data_tag: 1431 if (m != NULL) { 1432 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1433 // data->method() may be null in case of a concurrent 1434 // allocation. Maybe it's for the same method. Try to use that 1435 // entry in that case. 1436 if (dp->bci() == bci) { 1437 if (data->method() == NULL) { 1438 assert(concurrent, "impossible because no concurrent allocation"); 1439 return NULL; 1440 } else if (data->method() == m) { 1441 return data; 1442 } 1443 } 1444 } 1445 break; 1446 default: 1447 fatal("unexpected tag %d", dp->tag()); 1448 } 1449 } 1450 return NULL; 1451 } 1452 1453 1454 // Translate a bci to its corresponding extra data, or NULL. 1455 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1456 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1457 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1458 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1459 "code needs to be adjusted"); 1460 1461 // Do not create one of these if method has been redefined. 1462 if (m != NULL && m->is_old()) { 1463 return NULL; 1464 } 1465 1466 DataLayout* dp = extra_data_base(); 1467 DataLayout* end = args_data_limit(); 1468 1469 // Allocation in the extra data space has to be atomic because not 1470 // all entries have the same size and non atomic concurrent 1471 // allocation would result in a corrupted extra data space. 1472 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); 1473 if (result != NULL) { 1474 return result; 1475 } 1476 1477 if (create_if_missing && dp < end) { 1478 MutexLocker ml(&_extra_data_lock); 1479 // Check again now that we have the lock. Another thread may 1480 // have added extra data entries. 1481 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); 1482 if (result != NULL || dp >= end) { 1483 return result; 1484 } 1485 1486 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free"); 1487 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1488 u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1489 // SpeculativeTrapData is 2 slots. Make sure we have room. 1490 if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) { 1491 return NULL; 1492 } 1493 DataLayout temp; 1494 temp.initialize(tag, bci, 0); 1495 1496 dp->set_header(temp.header()); 1497 assert(dp->tag() == tag, "sane"); 1498 assert(dp->bci() == bci, "no concurrent allocation"); 1499 if (tag == DataLayout::bit_data_tag) { 1500 return new BitData(dp); 1501 } else { 1502 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1503 data->set_method(m); 1504 return data; 1505 } 1506 } 1507 return NULL; 1508 } 1509 1510 ArgInfoData *MethodData::arg_info() { 1511 DataLayout* dp = extra_data_base(); 1512 DataLayout* end = args_data_limit(); 1513 for (; dp < end; dp = next_extra(dp)) { 1514 if (dp->tag() == DataLayout::arg_info_data_tag) 1515 return new ArgInfoData(dp); 1516 } 1517 return NULL; 1518 } 1519 1520 // Printing 1521 1522 void MethodData::print_on(outputStream* st) const { 1523 assert(is_methodData(), "should be method data"); 1524 st->print("method data for "); 1525 method()->print_value_on(st); 1526 st->cr(); 1527 print_data_on(st); 1528 } 1529 1530 void MethodData::print_value_on(outputStream* st) const { 1531 assert(is_methodData(), "should be method data"); 1532 st->print("method data for "); 1533 method()->print_value_on(st); 1534 } 1535 1536 void MethodData::print_data_on(outputStream* st) const { 1537 ResourceMark rm; 1538 ProfileData* data = first_data(); 1539 if (_parameters_type_data_di != no_parameters) { 1540 parameters_type_data()->print_data_on(st); 1541 } 1542 for ( ; is_valid(data); data = next_data(data)) { 1543 st->print("%d", dp_to_di(data->dp())); 1544 st->fill_to(6); 1545 data->print_data_on(st, this); 1546 } 1547 st->print_cr("--- Extra data:"); 1548 DataLayout* dp = extra_data_base(); 1549 DataLayout* end = args_data_limit(); 1550 for (;; dp = next_extra(dp)) { 1551 assert(dp < end, "moved past end of extra data"); 1552 // No need for "OrderAccess::load_acquire" ops, 1553 // since the data structure is monotonic. 1554 switch(dp->tag()) { 1555 case DataLayout::no_tag: 1556 continue; 1557 case DataLayout::bit_data_tag: 1558 data = new BitData(dp); 1559 break; 1560 case DataLayout::speculative_trap_data_tag: 1561 data = new SpeculativeTrapData(dp); 1562 break; 1563 case DataLayout::arg_info_data_tag: 1564 data = new ArgInfoData(dp); 1565 dp = end; // ArgInfoData is at the end of extra data section. 1566 break; 1567 default: 1568 fatal("unexpected tag %d", dp->tag()); 1569 } 1570 st->print("%d", dp_to_di(data->dp())); 1571 st->fill_to(6); 1572 data->print_data_on(st); 1573 if (dp >= end) return; 1574 } 1575 } 1576 1577 #if INCLUDE_SERVICES 1578 // Size Statistics 1579 void MethodData::collect_statistics(KlassSizeStats *sz) const { 1580 int n = sz->count(this); 1581 sz->_method_data_bytes += n; 1582 sz->_method_all_bytes += n; 1583 sz->_rw_bytes += n; 1584 } 1585 #endif // INCLUDE_SERVICES 1586 1587 // Verification 1588 1589 void MethodData::verify_on(outputStream* st) { 1590 guarantee(is_methodData(), "object must be method data"); 1591 // guarantee(m->is_perm(), "should be in permspace"); 1592 this->verify_data_on(st); 1593 } 1594 1595 void MethodData::verify_data_on(outputStream* st) { 1596 NEEDS_CLEANUP; 1597 // not yet implemented. 1598 } 1599 1600 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1601 if (m->is_compiled_lambda_form()) { 1602 return true; 1603 } 1604 1605 Bytecode_invoke inv(m , bci); 1606 return inv.is_invokedynamic() || inv.is_invokehandle(); 1607 } 1608 1609 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1610 Bytecode_invoke inv(m , bci); 1611 if (inv.is_invokevirtual()) { 1612 if (inv.klass() == vmSymbols::jdk_internal_misc_Unsafe() || 1613 inv.klass() == vmSymbols::sun_misc_Unsafe()) { 1614 ResourceMark rm; 1615 char* name = inv.name()->as_C_string(); 1616 if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) { 1617 return true; 1618 } 1619 } 1620 } 1621 return false; 1622 } 1623 1624 int MethodData::profile_arguments_flag() { 1625 return TypeProfileLevel % 10; 1626 } 1627 1628 bool MethodData::profile_arguments() { 1629 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all; 1630 } 1631 1632 bool MethodData::profile_arguments_jsr292_only() { 1633 return profile_arguments_flag() == type_profile_jsr292; 1634 } 1635 1636 bool MethodData::profile_all_arguments() { 1637 return profile_arguments_flag() == type_profile_all; 1638 } 1639 1640 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1641 if (!profile_arguments()) { 1642 return false; 1643 } 1644 1645 if (profile_all_arguments()) { 1646 return true; 1647 } 1648 1649 if (profile_unsafe(m, bci)) { 1650 return true; 1651 } 1652 1653 assert(profile_arguments_jsr292_only(), "inconsistent"); 1654 return profile_jsr292(m, bci); 1655 } 1656 1657 int MethodData::profile_return_flag() { 1658 return (TypeProfileLevel % 100) / 10; 1659 } 1660 1661 bool MethodData::profile_return() { 1662 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1663 } 1664 1665 bool MethodData::profile_return_jsr292_only() { 1666 return profile_return_flag() == type_profile_jsr292; 1667 } 1668 1669 bool MethodData::profile_all_return() { 1670 return profile_return_flag() == type_profile_all; 1671 } 1672 1673 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1674 if (!profile_return()) { 1675 return false; 1676 } 1677 1678 if (profile_all_return()) { 1679 return true; 1680 } 1681 1682 assert(profile_return_jsr292_only(), "inconsistent"); 1683 return profile_jsr292(m, bci); 1684 } 1685 1686 int MethodData::profile_parameters_flag() { 1687 return TypeProfileLevel / 100; 1688 } 1689 1690 bool MethodData::profile_parameters() { 1691 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1692 } 1693 1694 bool MethodData::profile_parameters_jsr292_only() { 1695 return profile_parameters_flag() == type_profile_jsr292; 1696 } 1697 1698 bool MethodData::profile_all_parameters() { 1699 return profile_parameters_flag() == type_profile_all; 1700 } 1701 1702 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1703 if (!profile_parameters()) { 1704 return false; 1705 } 1706 1707 if (profile_all_parameters()) { 1708 return true; 1709 } 1710 1711 assert(profile_parameters_jsr292_only(), "inconsistent"); 1712 return m->is_compiled_lambda_form(); 1713 } 1714 1715 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1716 log_trace(cds)("Iter(MethodData): %p", this); 1717 it->push(&_method); 1718 } 1719 1720 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1721 if (shift == 0) { 1722 return; 1723 } 1724 if (!reset) { 1725 // Move all cells of trap entry at dp left by "shift" cells 1726 intptr_t* start = (intptr_t*)dp; 1727 intptr_t* end = (intptr_t*)next_extra(dp); 1728 for (intptr_t* ptr = start; ptr < end; ptr++) { 1729 *(ptr-shift) = *ptr; 1730 } 1731 } else { 1732 // Reset "shift" cells stopping at dp 1733 intptr_t* start = ((intptr_t*)dp) - shift; 1734 intptr_t* end = (intptr_t*)dp; 1735 for (intptr_t* ptr = start; ptr < end; ptr++) { 1736 *ptr = 0; 1737 } 1738 } 1739 } 1740 1741 // Check for entries that reference an unloaded method 1742 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1743 bool _always_clean; 1744 public: 1745 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} 1746 bool is_live(Method* m) { 1747 return !(_always_clean) && m->method_holder()->is_loader_alive(); 1748 } 1749 }; 1750 1751 // Check for entries that reference a redefined method 1752 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1753 public: 1754 CleanExtraDataMethodClosure() {} 1755 bool is_live(Method* m) { return !m->is_old(); } 1756 }; 1757 1758 1759 // Remove SpeculativeTrapData entries that reference an unloaded or 1760 // redefined method 1761 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1762 DataLayout* dp = extra_data_base(); 1763 DataLayout* end = args_data_limit(); 1764 1765 int shift = 0; 1766 for (; dp < end; dp = next_extra(dp)) { 1767 switch(dp->tag()) { 1768 case DataLayout::speculative_trap_data_tag: { 1769 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1770 Method* m = data->method(); 1771 assert(m != NULL, "should have a method"); 1772 if (!cl->is_live(m)) { 1773 // "shift" accumulates the number of cells for dead 1774 // SpeculativeTrapData entries that have been seen so 1775 // far. Following entries must be shifted left by that many 1776 // cells to remove the dead SpeculativeTrapData entries. 1777 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1778 } else { 1779 // Shift this entry left if it follows dead 1780 // SpeculativeTrapData entries 1781 clean_extra_data_helper(dp, shift); 1782 } 1783 break; 1784 } 1785 case DataLayout::bit_data_tag: 1786 // Shift this entry left if it follows dead SpeculativeTrapData 1787 // entries 1788 clean_extra_data_helper(dp, shift); 1789 continue; 1790 case DataLayout::no_tag: 1791 case DataLayout::arg_info_data_tag: 1792 // We are at end of the live trap entries. The previous "shift" 1793 // cells contain entries that are either dead or were shifted 1794 // left. They need to be reset to no_tag 1795 clean_extra_data_helper(dp, shift, true); 1796 return; 1797 default: 1798 fatal("unexpected tag %d", dp->tag()); 1799 } 1800 } 1801 } 1802 1803 // Verify there's no unloaded or redefined method referenced by a 1804 // SpeculativeTrapData entry 1805 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1806 #ifdef ASSERT 1807 DataLayout* dp = extra_data_base(); 1808 DataLayout* end = args_data_limit(); 1809 1810 for (; dp < end; dp = next_extra(dp)) { 1811 switch(dp->tag()) { 1812 case DataLayout::speculative_trap_data_tag: { 1813 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1814 Method* m = data->method(); 1815 assert(m != NULL && cl->is_live(m), "Method should exist"); 1816 break; 1817 } 1818 case DataLayout::bit_data_tag: 1819 continue; 1820 case DataLayout::no_tag: 1821 case DataLayout::arg_info_data_tag: 1822 return; 1823 default: 1824 fatal("unexpected tag %d", dp->tag()); 1825 } 1826 } 1827 #endif 1828 } 1829 1830 void MethodData::clean_method_data(bool always_clean) { 1831 ResourceMark rm; 1832 for (ProfileData* data = first_data(); 1833 is_valid(data); 1834 data = next_data(data)) { 1835 data->clean_weak_klass_links(always_clean); 1836 } 1837 ParametersTypeData* parameters = parameters_type_data(); 1838 if (parameters != NULL) { 1839 parameters->clean_weak_klass_links(always_clean); 1840 } 1841 1842 CleanExtraDataKlassClosure cl(always_clean); 1843 clean_extra_data(&cl); 1844 verify_extra_data_clean(&cl); 1845 } 1846 1847 // This is called during redefinition to clean all "old" redefined 1848 // methods out of MethodData for all methods. 1849 void MethodData::clean_weak_method_links() { 1850 ResourceMark rm; 1851 for (ProfileData* data = first_data(); 1852 is_valid(data); 1853 data = next_data(data)) { 1854 data->clean_weak_method_links(); 1855 } 1856 1857 CleanExtraDataMethodClosure cl; 1858 clean_extra_data(&cl); 1859 verify_extra_data_clean(&cl); 1860 } 1861 1862 #ifdef ASSERT 1863 void MethodData::verify_clean_weak_method_links() { 1864 ResourceMark rm; 1865 for (ProfileData* data = first_data(); 1866 is_valid(data); 1867 data = next_data(data)) { 1868 data->verify_clean_weak_method_links(); 1869 } 1870 1871 CleanExtraDataMethodClosure cl; 1872 verify_extra_data_clean(&cl); 1873 } 1874 #endif // ASSERT