1 /*
   2  * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compilationPolicy.hpp"
  28 #include "compiler/compilerOracle.hpp"
  29 #include "interpreter/bytecode.hpp"
  30 #include "interpreter/bytecodeStream.hpp"
  31 #include "interpreter/linkResolver.hpp"
  32 #include "memory/metaspaceClosure.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/methodData.inline.hpp"
  35 #include "prims/jvmtiRedefineClasses.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/deoptimization.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/orderAccess.hpp"
  41 #include "runtime/safepointVerifiers.hpp"
  42 #include "utilities/align.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 // ==================================================================
  46 // DataLayout
  47 //
  48 // Overlay for generic profiling data.
  49 
  50 // Some types of data layouts need a length field.
  51 bool DataLayout::needs_array_len(u1 tag) {
  52   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  53 }
  54 
  55 // Perform generic initialization of the data.  More specific
  56 // initialization occurs in overrides of ProfileData::post_initialize.
  57 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  58   _header._bits = (intptr_t)0;
  59   _header._struct._tag = tag;
  60   _header._struct._bci = bci;
  61   for (int i = 0; i < cell_count; i++) {
  62     set_cell_at(i, (intptr_t)0);
  63   }
  64   if (needs_array_len(tag)) {
  65     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  66   }
  67   if (tag == call_type_data_tag) {
  68     CallTypeData::initialize(this, cell_count);
  69   } else if (tag == virtual_call_type_data_tag) {
  70     VirtualCallTypeData::initialize(this, cell_count);
  71   }
  72 }
  73 
  74 void DataLayout::clean_weak_klass_links(bool always_clean) {
  75   ResourceMark m;
  76   data_in()->clean_weak_klass_links(always_clean);
  77 }
  78 
  79 
  80 // ==================================================================
  81 // ProfileData
  82 //
  83 // A ProfileData object is created to refer to a section of profiling
  84 // data in a structured way.
  85 
  86 // Constructor for invalid ProfileData.
  87 ProfileData::ProfileData() {
  88   _data = NULL;
  89 }
  90 
  91 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  92   DataLayout* dp  = md->extra_data_base();
  93   DataLayout* end = md->args_data_limit();
  94   stringStream ss;
  95   for (;; dp = MethodData::next_extra(dp)) {
  96     assert(dp < end, "moved past end of extra data");
  97     switch(dp->tag()) {
  98     case DataLayout::speculative_trap_data_tag:
  99       if (dp->bci() == bci()) {
 100         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 101         int trap = data->trap_state();
 102         char buf[100];
 103         ss.print("trap/");
 104         data->method()->print_short_name(&ss);
 105         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 106       }
 107       break;
 108     case DataLayout::bit_data_tag:
 109       break;
 110     case DataLayout::no_tag:
 111     case DataLayout::arg_info_data_tag:
 112       return ss.as_string();
 113       break;
 114     default:
 115       fatal("unexpected tag %d", dp->tag());
 116     }
 117   }
 118   return NULL;
 119 }
 120 
 121 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 122   print_data_on(st, print_data_on_helper(md));
 123 }
 124 
 125 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 126   st->print("bci: %d", bci());
 127   st->fill_to(tab_width_one);
 128   st->print("%s", name);
 129   tab(st);
 130   int trap = trap_state();
 131   if (trap != 0) {
 132     char buf[100];
 133     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 134   }
 135   if (extra != NULL) {
 136     st->print("%s", extra);
 137   }
 138   int flags = data()->flags();
 139   if (flags != 0) {
 140     st->print("flags(%d) ", flags);
 141   }
 142 }
 143 
 144 void ProfileData::tab(outputStream* st, bool first) const {
 145   st->fill_to(first ? tab_width_one : tab_width_two);
 146 }
 147 
 148 // ==================================================================
 149 // BitData
 150 //
 151 // A BitData corresponds to a one-bit flag.  This is used to indicate
 152 // whether a checkcast bytecode has seen a null value.
 153 
 154 
 155 void BitData::print_data_on(outputStream* st, const char* extra) const {
 156   print_shared(st, "BitData", extra);
 157   st->cr();
 158 }
 159 
 160 // ==================================================================
 161 // CounterData
 162 //
 163 // A CounterData corresponds to a simple counter.
 164 
 165 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 166   print_shared(st, "CounterData", extra);
 167   st->print_cr("count(%u)", count());
 168 }
 169 
 170 // ==================================================================
 171 // JumpData
 172 //
 173 // A JumpData is used to access profiling information for a direct
 174 // branch.  It is a counter, used for counting the number of branches,
 175 // plus a data displacement, used for realigning the data pointer to
 176 // the corresponding target bci.
 177 
 178 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 179   assert(stream->bci() == bci(), "wrong pos");
 180   int target;
 181   Bytecodes::Code c = stream->code();
 182   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 183     target = stream->dest_w();
 184   } else {
 185     target = stream->dest();
 186   }
 187   int my_di = mdo->dp_to_di(dp());
 188   int target_di = mdo->bci_to_di(target);
 189   int offset = target_di - my_di;
 190   set_displacement(offset);
 191 }
 192 
 193 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 194   print_shared(st, "JumpData", extra);
 195   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 196 }
 197 
 198 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 199   // Parameter profiling include the receiver
 200   int args_count = include_receiver ? 1 : 0;
 201   ResourceMark rm;
 202   ReferenceArgumentCount rac(signature);
 203   args_count += rac.count();
 204   args_count = MIN2(args_count, max);
 205   return args_count * per_arg_cell_count;
 206 }
 207 
 208 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 209   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 210   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 211   const methodHandle m = stream->method();
 212   int bci = stream->bci();
 213   Bytecode_invoke inv(m, bci);
 214   int args_cell = 0;
 215   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 216     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 217   }
 218   int ret_cell = 0;
 219   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 220     ret_cell = ReturnTypeEntry::static_cell_count();
 221   }
 222   int header_cell = 0;
 223   if (args_cell + ret_cell > 0) {
 224     header_cell = header_cell_count();
 225   }
 226 
 227   return header_cell + args_cell + ret_cell;
 228 }
 229 
 230 class ArgumentOffsetComputer : public SignatureIterator {
 231 private:
 232   int _max;
 233   int _offset;
 234   GrowableArray<int> _offsets;
 235 
 236   friend class SignatureIterator;  // so do_parameters_on can call do_type
 237   void do_type(BasicType type) {
 238     if (is_reference_type(type) && _offsets.length() < _max) {
 239       _offsets.push(_offset);
 240     }
 241     _offset += parameter_type_word_count(type);
 242   }
 243 
 244  public:
 245   ArgumentOffsetComputer(Symbol* signature, int max)
 246     : SignatureIterator(signature),
 247       _max(max), _offset(0),
 248       _offsets(Thread::current(), max) {
 249     do_parameters_on(this);  // non-virtual template execution
 250   }
 251 
 252   int off_at(int i) const { return _offsets.at(i); }
 253 };
 254 
 255 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 256   ResourceMark rm;
 257   int start = 0;
 258   // Parameter profiling include the receiver
 259   if (include_receiver && has_receiver) {
 260     set_stack_slot(0, 0);
 261     set_type(0, type_none());
 262     start += 1;
 263   }
 264   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 265   for (int i = start; i < _number_of_entries; i++) {
 266     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 267     set_type(i, type_none());
 268   }
 269 }
 270 
 271 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 272   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 273   Bytecode_invoke inv(stream->method(), stream->bci());
 274 
 275   if (has_arguments()) {
 276 #ifdef ASSERT
 277     ResourceMark rm;
 278     ReferenceArgumentCount rac(inv.signature());
 279     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 280     assert(count > 0, "room for args type but none found?");
 281     check_number_of_arguments(count);
 282 #endif
 283     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 284   }
 285 
 286   if (has_return()) {
 287     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 288     _ret.post_initialize();
 289   }
 290 }
 291 
 292 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 293   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 294   Bytecode_invoke inv(stream->method(), stream->bci());
 295 
 296   if (has_arguments()) {
 297 #ifdef ASSERT
 298     ResourceMark rm;
 299     ReferenceArgumentCount rac(inv.signature());
 300     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 301     assert(count > 0, "room for args type but none found?");
 302     check_number_of_arguments(count);
 303 #endif
 304     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 305   }
 306 
 307   if (has_return()) {
 308     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 309     _ret.post_initialize();
 310   }
 311 }
 312 
 313 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 314   for (int i = 0; i < _number_of_entries; i++) {
 315     intptr_t p = type(i);
 316     Klass* k = (Klass*)klass_part(p);
 317     if (k != NULL && (always_clean || !k->is_loader_alive())) {
 318       set_type(i, with_status((Klass*)NULL, p));
 319     }
 320   }
 321 }
 322 
 323 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 324   intptr_t p = type();
 325   Klass* k = (Klass*)klass_part(p);
 326   if (k != NULL && (always_clean || !k->is_loader_alive())) {
 327     set_type(with_status((Klass*)NULL, p));
 328   }
 329 }
 330 
 331 bool TypeEntriesAtCall::return_profiling_enabled() {
 332   return MethodData::profile_return();
 333 }
 334 
 335 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 336   return MethodData::profile_arguments();
 337 }
 338 
 339 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 340   if (is_type_none(k)) {
 341     st->print("none");
 342   } else if (is_type_unknown(k)) {
 343     st->print("unknown");
 344   } else {
 345     valid_klass(k)->print_value_on(st);
 346   }
 347   if (was_null_seen(k)) {
 348     st->print(" (null seen)");
 349   }
 350 }
 351 
 352 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 353   for (int i = 0; i < _number_of_entries; i++) {
 354     _pd->tab(st);
 355     st->print("%d: stack(%u) ", i, stack_slot(i));
 356     print_klass(st, type(i));
 357     st->cr();
 358   }
 359 }
 360 
 361 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 362   _pd->tab(st);
 363   print_klass(st, type());
 364   st->cr();
 365 }
 366 
 367 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 368   CounterData::print_data_on(st, extra);
 369   if (has_arguments()) {
 370     tab(st, true);
 371     st->print("argument types");
 372     _args.print_data_on(st);
 373   }
 374   if (has_return()) {
 375     tab(st, true);
 376     st->print("return type");
 377     _ret.print_data_on(st);
 378   }
 379 }
 380 
 381 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 382   VirtualCallData::print_data_on(st, extra);
 383   if (has_arguments()) {
 384     tab(st, true);
 385     st->print("argument types");
 386     _args.print_data_on(st);
 387   }
 388   if (has_return()) {
 389     tab(st, true);
 390     st->print("return type");
 391     _ret.print_data_on(st);
 392   }
 393 }
 394 
 395 // ==================================================================
 396 // ReceiverTypeData
 397 //
 398 // A ReceiverTypeData is used to access profiling information about a
 399 // dynamic type check.  It consists of a counter which counts the total times
 400 // that the check is reached, and a series of (Klass*, count) pairs
 401 // which are used to store a type profile for the receiver of the check.
 402 
 403 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 404     for (uint row = 0; row < row_limit(); row++) {
 405     Klass* p = receiver(row);
 406     if (p != NULL && (always_clean || !p->is_loader_alive())) {
 407       clear_row(row);
 408     }
 409   }
 410 }
 411 
 412 #if INCLUDE_JVMCI
 413 void VirtualCallData::clean_weak_klass_links(bool always_clean) {
 414   ReceiverTypeData::clean_weak_klass_links(always_clean);
 415   for (uint row = 0; row < method_row_limit(); row++) {
 416     Method* p = method(row);
 417     if (p != NULL && (always_clean || !p->method_holder()->is_loader_alive())) {
 418       clear_method_row(row);
 419     }
 420   }
 421 }
 422 
 423 void VirtualCallData::clean_weak_method_links() {
 424   ReceiverTypeData::clean_weak_method_links();
 425   for (uint row = 0; row < method_row_limit(); row++) {
 426     Method* p = method(row);
 427     if (p != NULL && p->is_old()) {
 428       clear_method_row(row);
 429     }
 430   }
 431 }
 432 #endif // INCLUDE_JVMCI
 433 
 434 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 435   uint row;
 436   int entries = 0;
 437   for (row = 0; row < row_limit(); row++) {
 438     if (receiver(row) != NULL)  entries++;
 439   }
 440 #if INCLUDE_JVMCI
 441   st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
 442 #else
 443   st->print_cr("count(%u) entries(%u)", count(), entries);
 444 #endif
 445   int total = count();
 446   for (row = 0; row < row_limit(); row++) {
 447     if (receiver(row) != NULL) {
 448       total += receiver_count(row);
 449     }
 450   }
 451   for (row = 0; row < row_limit(); row++) {
 452     if (receiver(row) != NULL) {
 453       tab(st);
 454       receiver(row)->print_value_on(st);
 455       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 456     }
 457   }
 458 }
 459 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 460   print_shared(st, "ReceiverTypeData", extra);
 461   print_receiver_data_on(st);
 462 }
 463 
 464 #if INCLUDE_JVMCI
 465 void VirtualCallData::print_method_data_on(outputStream* st) const {
 466   uint row;
 467   int entries = 0;
 468   for (row = 0; row < method_row_limit(); row++) {
 469     if (method(row) != NULL) entries++;
 470   }
 471   tab(st);
 472   st->print_cr("method_entries(%u)", entries);
 473   int total = count();
 474   for (row = 0; row < method_row_limit(); row++) {
 475     if (method(row) != NULL) {
 476       total += method_count(row);
 477     }
 478   }
 479   for (row = 0; row < method_row_limit(); row++) {
 480     if (method(row) != NULL) {
 481       tab(st);
 482       method(row)->print_value_on(st);
 483       st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total);
 484     }
 485   }
 486 }
 487 #endif // INCLUDE_JVMCI
 488 
 489 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 490   print_shared(st, "VirtualCallData", extra);
 491   print_receiver_data_on(st);
 492   print_method_data_on(st);
 493 }
 494 
 495 // ==================================================================
 496 // RetData
 497 //
 498 // A RetData is used to access profiling information for a ret bytecode.
 499 // It is composed of a count of the number of times that the ret has
 500 // been executed, followed by a series of triples of the form
 501 // (bci, count, di) which count the number of times that some bci was the
 502 // target of the ret and cache a corresponding displacement.
 503 
 504 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 505   for (uint row = 0; row < row_limit(); row++) {
 506     set_bci_displacement(row, -1);
 507     set_bci(row, no_bci);
 508   }
 509   // release so other threads see a consistent state.  bci is used as
 510   // a valid flag for bci_displacement.
 511   OrderAccess::release();
 512 }
 513 
 514 // This routine needs to atomically update the RetData structure, so the
 515 // caller needs to hold the RetData_lock before it gets here.  Since taking
 516 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 517 // wrapper around a derived oop, taking the lock in _this_ method will
 518 // basically cause the 'this' pointer's _data field to contain junk after the
 519 // lock.  We require the caller to take the lock before making the ProfileData
 520 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 521 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 522   // First find the mdp which corresponds to the return bci.
 523   address mdp = h_mdo->bci_to_dp(return_bci);
 524 
 525   // Now check to see if any of the cache slots are open.
 526   for (uint row = 0; row < row_limit(); row++) {
 527     if (bci(row) == no_bci) {
 528       set_bci_displacement(row, mdp - dp());
 529       set_bci_count(row, DataLayout::counter_increment);
 530       // Barrier to ensure displacement is written before the bci; allows
 531       // the interpreter to read displacement without fear of race condition.
 532       release_set_bci(row, return_bci);
 533       break;
 534     }
 535   }
 536   return mdp;
 537 }
 538 
 539 void RetData::print_data_on(outputStream* st, const char* extra) const {
 540   print_shared(st, "RetData", extra);
 541   uint row;
 542   int entries = 0;
 543   for (row = 0; row < row_limit(); row++) {
 544     if (bci(row) != no_bci)  entries++;
 545   }
 546   st->print_cr("count(%u) entries(%u)", count(), entries);
 547   for (row = 0; row < row_limit(); row++) {
 548     if (bci(row) != no_bci) {
 549       tab(st);
 550       st->print_cr("bci(%d: count(%u) displacement(%d))",
 551                    bci(row), bci_count(row), bci_displacement(row));
 552     }
 553   }
 554 }
 555 
 556 // ==================================================================
 557 // BranchData
 558 //
 559 // A BranchData is used to access profiling data for a two-way branch.
 560 // It consists of taken and not_taken counts as well as a data displacement
 561 // for the taken case.
 562 
 563 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 564   assert(stream->bci() == bci(), "wrong pos");
 565   int target = stream->dest();
 566   int my_di = mdo->dp_to_di(dp());
 567   int target_di = mdo->bci_to_di(target);
 568   int offset = target_di - my_di;
 569   set_displacement(offset);
 570 }
 571 
 572 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 573   print_shared(st, "BranchData", extra);
 574   st->print_cr("taken(%u) displacement(%d)",
 575                taken(), displacement());
 576   tab(st);
 577   st->print_cr("not taken(%u)", not_taken());
 578 }
 579 
 580 // ==================================================================
 581 // MultiBranchData
 582 //
 583 // A MultiBranchData is used to access profiling information for
 584 // a multi-way branch (*switch bytecodes).  It consists of a series
 585 // of (count, displacement) pairs, which count the number of times each
 586 // case was taken and specify the data displacment for each branch target.
 587 
 588 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 589   int cell_count = 0;
 590   if (stream->code() == Bytecodes::_tableswitch) {
 591     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 592     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 593   } else {
 594     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 595     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 596   }
 597   return cell_count;
 598 }
 599 
 600 void MultiBranchData::post_initialize(BytecodeStream* stream,
 601                                       MethodData* mdo) {
 602   assert(stream->bci() == bci(), "wrong pos");
 603   int target;
 604   int my_di;
 605   int target_di;
 606   int offset;
 607   if (stream->code() == Bytecodes::_tableswitch) {
 608     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 609     int len = sw.length();
 610     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 611     for (int count = 0; count < len; count++) {
 612       target = sw.dest_offset_at(count) + bci();
 613       my_di = mdo->dp_to_di(dp());
 614       target_di = mdo->bci_to_di(target);
 615       offset = target_di - my_di;
 616       set_displacement_at(count, offset);
 617     }
 618     target = sw.default_offset() + bci();
 619     my_di = mdo->dp_to_di(dp());
 620     target_di = mdo->bci_to_di(target);
 621     offset = target_di - my_di;
 622     set_default_displacement(offset);
 623 
 624   } else {
 625     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 626     int npairs = sw.number_of_pairs();
 627     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 628     for (int count = 0; count < npairs; count++) {
 629       LookupswitchPair pair = sw.pair_at(count);
 630       target = pair.offset() + bci();
 631       my_di = mdo->dp_to_di(dp());
 632       target_di = mdo->bci_to_di(target);
 633       offset = target_di - my_di;
 634       set_displacement_at(count, offset);
 635     }
 636     target = sw.default_offset() + bci();
 637     my_di = mdo->dp_to_di(dp());
 638     target_di = mdo->bci_to_di(target);
 639     offset = target_di - my_di;
 640     set_default_displacement(offset);
 641   }
 642 }
 643 
 644 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 645   print_shared(st, "MultiBranchData", extra);
 646   st->print_cr("default_count(%u) displacement(%d)",
 647                default_count(), default_displacement());
 648   int cases = number_of_cases();
 649   for (int i = 0; i < cases; i++) {
 650     tab(st);
 651     st->print_cr("count(%u) displacement(%d)",
 652                  count_at(i), displacement_at(i));
 653   }
 654 }
 655 
 656 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 657   print_shared(st, "ArgInfoData", extra);
 658   int nargs = number_of_args();
 659   for (int i = 0; i < nargs; i++) {
 660     st->print("  0x%x", arg_modified(i));
 661   }
 662   st->cr();
 663 }
 664 
 665 int ParametersTypeData::compute_cell_count(Method* m) {
 666   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 667     return 0;
 668   }
 669   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 670   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 671   if (obj_args > 0) {
 672     return obj_args + 1; // 1 cell for array len
 673   }
 674   return 0;
 675 }
 676 
 677 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 678   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 679 }
 680 
 681 bool ParametersTypeData::profiling_enabled() {
 682   return MethodData::profile_parameters();
 683 }
 684 
 685 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 686   st->print("parameter types"); // FIXME extra ignored?
 687   _parameters.print_data_on(st);
 688 }
 689 
 690 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 691   print_shared(st, "SpeculativeTrapData", extra);
 692   tab(st);
 693   method()->print_short_name(st);
 694   st->cr();
 695 }
 696 
 697 // ==================================================================
 698 // MethodData*
 699 //
 700 // A MethodData* holds information which has been collected about
 701 // a method.
 702 
 703 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 704   int size = MethodData::compute_allocation_size_in_words(method);
 705 
 706   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 707     MethodData(method, size, THREAD);
 708 }
 709 
 710 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 711   if (is_client_compilation_mode_vm()) {
 712     return no_profile_data;
 713   }
 714   switch (code) {
 715   case Bytecodes::_checkcast:
 716   case Bytecodes::_instanceof:
 717   case Bytecodes::_aastore:
 718     if (TypeProfileCasts) {
 719       return ReceiverTypeData::static_cell_count();
 720     } else {
 721       return BitData::static_cell_count();
 722     }
 723   case Bytecodes::_invokespecial:
 724   case Bytecodes::_invokestatic:
 725     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 726       return variable_cell_count;
 727     } else {
 728       return CounterData::static_cell_count();
 729     }
 730   case Bytecodes::_goto:
 731   case Bytecodes::_goto_w:
 732   case Bytecodes::_jsr:
 733   case Bytecodes::_jsr_w:
 734     return JumpData::static_cell_count();
 735   case Bytecodes::_invokevirtual:
 736   case Bytecodes::_invokeinterface:
 737     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 738       return variable_cell_count;
 739     } else {
 740       return VirtualCallData::static_cell_count();
 741     }
 742   case Bytecodes::_invokedynamic:
 743     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 744       return variable_cell_count;
 745     } else {
 746       return CounterData::static_cell_count();
 747     }
 748   case Bytecodes::_ret:
 749     return RetData::static_cell_count();
 750   case Bytecodes::_ifeq:
 751   case Bytecodes::_ifne:
 752   case Bytecodes::_iflt:
 753   case Bytecodes::_ifge:
 754   case Bytecodes::_ifgt:
 755   case Bytecodes::_ifle:
 756   case Bytecodes::_if_icmpeq:
 757   case Bytecodes::_if_icmpne:
 758   case Bytecodes::_if_icmplt:
 759   case Bytecodes::_if_icmpge:
 760   case Bytecodes::_if_icmpgt:
 761   case Bytecodes::_if_icmple:
 762   case Bytecodes::_if_acmpeq:
 763   case Bytecodes::_if_acmpne:
 764   case Bytecodes::_ifnull:
 765   case Bytecodes::_ifnonnull:
 766     return BranchData::static_cell_count();
 767   case Bytecodes::_lookupswitch:
 768   case Bytecodes::_tableswitch:
 769     return variable_cell_count;
 770   default:
 771     return no_profile_data;
 772   }
 773 }
 774 
 775 // Compute the size of the profiling information corresponding to
 776 // the current bytecode.
 777 int MethodData::compute_data_size(BytecodeStream* stream) {
 778   int cell_count = bytecode_cell_count(stream->code());
 779   if (cell_count == no_profile_data) {
 780     return 0;
 781   }
 782   if (cell_count == variable_cell_count) {
 783     switch (stream->code()) {
 784     case Bytecodes::_lookupswitch:
 785     case Bytecodes::_tableswitch:
 786       cell_count = MultiBranchData::compute_cell_count(stream);
 787       break;
 788     case Bytecodes::_invokespecial:
 789     case Bytecodes::_invokestatic:
 790     case Bytecodes::_invokedynamic:
 791       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 792       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 793           profile_return_for_invoke(stream->method(), stream->bci())) {
 794         cell_count = CallTypeData::compute_cell_count(stream);
 795       } else {
 796         cell_count = CounterData::static_cell_count();
 797       }
 798       break;
 799     case Bytecodes::_invokevirtual:
 800     case Bytecodes::_invokeinterface: {
 801       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 802       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 803           profile_return_for_invoke(stream->method(), stream->bci())) {
 804         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 805       } else {
 806         cell_count = VirtualCallData::static_cell_count();
 807       }
 808       break;
 809     }
 810     default:
 811       fatal("unexpected bytecode for var length profile data");
 812     }
 813   }
 814   // Note:  cell_count might be zero, meaning that there is just
 815   //        a DataLayout header, with no extra cells.
 816   assert(cell_count >= 0, "sanity");
 817   return DataLayout::compute_size_in_bytes(cell_count);
 818 }
 819 
 820 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 821   // Bytecodes for which we may use speculation
 822   switch (code) {
 823   case Bytecodes::_checkcast:
 824   case Bytecodes::_instanceof:
 825   case Bytecodes::_aastore:
 826   case Bytecodes::_invokevirtual:
 827   case Bytecodes::_invokeinterface:
 828   case Bytecodes::_if_acmpeq:
 829   case Bytecodes::_if_acmpne:
 830   case Bytecodes::_ifnull:
 831   case Bytecodes::_ifnonnull:
 832   case Bytecodes::_invokestatic:
 833 #ifdef COMPILER2
 834     if (is_server_compilation_mode_vm()) {
 835       return UseTypeSpeculation;
 836     }
 837 #endif
 838   default:
 839     return false;
 840   }
 841   return false;
 842 }
 843 
 844 #if INCLUDE_JVMCI
 845 
 846 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 847   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 848 }
 849 
 850 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) {
 851   memcpy(data(), speculation, speculation_len);
 852 }
 853 
 854 // A heuristic check to detect nmethods that outlive a failed speculations list.
 855 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 856   jlong head = (jlong)(address) *failed_speculations_address;
 857   if ((head & 0x1) == 0x1) {
 858     stringStream st;
 859     if (nm != NULL) {
 860       st.print("%d", nm->compile_id());
 861       Method* method = nm->method();
 862       st.print_raw("{");
 863       if (method != NULL) {
 864         method->print_name(&st);
 865       } else {
 866         const char* jvmci_name = nm->jvmci_name();
 867         if (jvmci_name != NULL) {
 868           st.print_raw(jvmci_name);
 869         }
 870       }
 871       st.print_raw("}");
 872     } else {
 873       st.print("<unknown>");
 874     }
 875     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 876   }
 877 }
 878 
 879 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 880   assert(failed_speculations_address != NULL, "must be");
 881   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 882   FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 883   if (fs == NULL) {
 884     // no memory -> ignore failed speculation
 885     return false;
 886   }
 887 
 888   guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 889   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 890 
 891   FailedSpeculation** cursor = failed_speculations_address;
 892   do {
 893     if (*cursor == NULL) {
 894       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs);
 895       if (old_fs == NULL) {
 896         // Successfully appended fs to end of the list
 897         return true;
 898       }
 899       cursor = old_fs->next_adr();
 900     } else {
 901       cursor = (*cursor)->next_adr();
 902     }
 903   } while (true);
 904 }
 905 
 906 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 907   assert(failed_speculations_address != NULL, "must be");
 908   FailedSpeculation* fs = *failed_speculations_address;
 909   while (fs != NULL) {
 910     FailedSpeculation* next = fs->next();
 911     delete fs;
 912     fs = next;
 913   }
 914 
 915   // Write an unaligned value to failed_speculations_address to denote
 916   // that it is no longer a valid pointer. This is allows for the check
 917   // in add_failed_speculation against adding to a freed failed
 918   // speculations list.
 919   long* head = (long*) failed_speculations_address;
 920   (*head) = (*head) | 0x1;
 921 }
 922 #endif // INCLUDE_JVMCI
 923 
 924 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 925 #if INCLUDE_JVMCI
 926   if (ProfileTraps) {
 927     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 928     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 929 
 930     // Make sure we have a minimum number of extra data slots to
 931     // allocate SpeculativeTrapData entries. We would want to have one
 932     // entry per compilation that inlines this method and for which
 933     // some type speculation assumption fails. So the room we need for
 934     // the SpeculativeTrapData entries doesn't directly depend on the
 935     // size of the method. Because it's hard to estimate, we reserve
 936     // space for an arbitrary number of entries.
 937     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 938       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 939 
 940     return MAX2(extra_data_count, spec_data_count);
 941   } else {
 942     return 0;
 943   }
 944 #else // INCLUDE_JVMCI
 945   if (ProfileTraps) {
 946     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 947     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 948     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 949     int one_percent_of_data
 950       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 951     if (extra_data_count < one_percent_of_data)
 952       extra_data_count = one_percent_of_data;
 953     if (extra_data_count > empty_bc_count)
 954       extra_data_count = empty_bc_count;  // no need for more
 955 
 956     // Make sure we have a minimum number of extra data slots to
 957     // allocate SpeculativeTrapData entries. We would want to have one
 958     // entry per compilation that inlines this method and for which
 959     // some type speculation assumption fails. So the room we need for
 960     // the SpeculativeTrapData entries doesn't directly depend on the
 961     // size of the method. Because it's hard to estimate, we reserve
 962     // space for an arbitrary number of entries.
 963     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 964       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 965 
 966     return MAX2(extra_data_count, spec_data_count);
 967   } else {
 968     return 0;
 969   }
 970 #endif // INCLUDE_JVMCI
 971 }
 972 
 973 // Compute the size of the MethodData* necessary to store
 974 // profiling information about a given method.  Size is in bytes.
 975 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 976   int data_size = 0;
 977   BytecodeStream stream(method);
 978   Bytecodes::Code c;
 979   int empty_bc_count = 0;  // number of bytecodes lacking data
 980   bool needs_speculative_traps = false;
 981   while ((c = stream.next()) >= 0) {
 982     int size_in_bytes = compute_data_size(&stream);
 983     data_size += size_in_bytes;
 984     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 985     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 986   }
 987   int object_size = in_bytes(data_offset()) + data_size;
 988 
 989   // Add some extra DataLayout cells (at least one) to track stray traps.
 990   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 991   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 992 
 993   // Add a cell to record information about modified arguments.
 994   int arg_size = method->size_of_parameters();
 995   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
 996 
 997   // Reserve room for an area of the MDO dedicated to profiling of
 998   // parameters
 999   int args_cell = ParametersTypeData::compute_cell_count(method());
1000   if (args_cell > 0) {
1001     object_size += DataLayout::compute_size_in_bytes(args_cell);
1002   }
1003   return object_size;
1004 }
1005 
1006 // Compute the size of the MethodData* necessary to store
1007 // profiling information about a given method.  Size is in words
1008 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1009   int byte_size = compute_allocation_size_in_bytes(method);
1010   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1011   return align_metadata_size(word_size);
1012 }
1013 
1014 // Initialize an individual data segment.  Returns the size of
1015 // the segment in bytes.
1016 int MethodData::initialize_data(BytecodeStream* stream,
1017                                        int data_index) {
1018   if (is_client_compilation_mode_vm()) {
1019     return 0;
1020   }
1021   int cell_count = -1;
1022   int tag = DataLayout::no_tag;
1023   DataLayout* data_layout = data_layout_at(data_index);
1024   Bytecodes::Code c = stream->code();
1025   switch (c) {
1026   case Bytecodes::_checkcast:
1027   case Bytecodes::_instanceof:
1028   case Bytecodes::_aastore:
1029     if (TypeProfileCasts) {
1030       cell_count = ReceiverTypeData::static_cell_count();
1031       tag = DataLayout::receiver_type_data_tag;
1032     } else {
1033       cell_count = BitData::static_cell_count();
1034       tag = DataLayout::bit_data_tag;
1035     }
1036     break;
1037   case Bytecodes::_invokespecial:
1038   case Bytecodes::_invokestatic: {
1039     int counter_data_cell_count = CounterData::static_cell_count();
1040     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1041         profile_return_for_invoke(stream->method(), stream->bci())) {
1042       cell_count = CallTypeData::compute_cell_count(stream);
1043     } else {
1044       cell_count = counter_data_cell_count;
1045     }
1046     if (cell_count > counter_data_cell_count) {
1047       tag = DataLayout::call_type_data_tag;
1048     } else {
1049       tag = DataLayout::counter_data_tag;
1050     }
1051     break;
1052   }
1053   case Bytecodes::_goto:
1054   case Bytecodes::_goto_w:
1055   case Bytecodes::_jsr:
1056   case Bytecodes::_jsr_w:
1057     cell_count = JumpData::static_cell_count();
1058     tag = DataLayout::jump_data_tag;
1059     break;
1060   case Bytecodes::_invokevirtual:
1061   case Bytecodes::_invokeinterface: {
1062     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1063     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1064         profile_return_for_invoke(stream->method(), stream->bci())) {
1065       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1066     } else {
1067       cell_count = virtual_call_data_cell_count;
1068     }
1069     if (cell_count > virtual_call_data_cell_count) {
1070       tag = DataLayout::virtual_call_type_data_tag;
1071     } else {
1072       tag = DataLayout::virtual_call_data_tag;
1073     }
1074     break;
1075   }
1076   case Bytecodes::_invokedynamic: {
1077     // %%% should make a type profile for any invokedynamic that takes a ref argument
1078     int counter_data_cell_count = CounterData::static_cell_count();
1079     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1080         profile_return_for_invoke(stream->method(), stream->bci())) {
1081       cell_count = CallTypeData::compute_cell_count(stream);
1082     } else {
1083       cell_count = counter_data_cell_count;
1084     }
1085     if (cell_count > counter_data_cell_count) {
1086       tag = DataLayout::call_type_data_tag;
1087     } else {
1088       tag = DataLayout::counter_data_tag;
1089     }
1090     break;
1091   }
1092   case Bytecodes::_ret:
1093     cell_count = RetData::static_cell_count();
1094     tag = DataLayout::ret_data_tag;
1095     break;
1096   case Bytecodes::_ifeq:
1097   case Bytecodes::_ifne:
1098   case Bytecodes::_iflt:
1099   case Bytecodes::_ifge:
1100   case Bytecodes::_ifgt:
1101   case Bytecodes::_ifle:
1102   case Bytecodes::_if_icmpeq:
1103   case Bytecodes::_if_icmpne:
1104   case Bytecodes::_if_icmplt:
1105   case Bytecodes::_if_icmpge:
1106   case Bytecodes::_if_icmpgt:
1107   case Bytecodes::_if_icmple:
1108   case Bytecodes::_if_acmpeq:
1109   case Bytecodes::_if_acmpne:
1110   case Bytecodes::_ifnull:
1111   case Bytecodes::_ifnonnull:
1112     cell_count = BranchData::static_cell_count();
1113     tag = DataLayout::branch_data_tag;
1114     break;
1115   case Bytecodes::_lookupswitch:
1116   case Bytecodes::_tableswitch:
1117     cell_count = MultiBranchData::compute_cell_count(stream);
1118     tag = DataLayout::multi_branch_data_tag;
1119     break;
1120   default:
1121     break;
1122   }
1123   assert(tag == DataLayout::multi_branch_data_tag ||
1124          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1125           (tag == DataLayout::call_type_data_tag ||
1126            tag == DataLayout::counter_data_tag ||
1127            tag == DataLayout::virtual_call_type_data_tag ||
1128            tag == DataLayout::virtual_call_data_tag)) ||
1129          cell_count == bytecode_cell_count(c), "cell counts must agree");
1130   if (cell_count >= 0) {
1131     assert(tag != DataLayout::no_tag, "bad tag");
1132     assert(bytecode_has_profile(c), "agree w/ BHP");
1133     data_layout->initialize(tag, stream->bci(), cell_count);
1134     return DataLayout::compute_size_in_bytes(cell_count);
1135   } else {
1136     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1137     return 0;
1138   }
1139 }
1140 
1141 // Get the data at an arbitrary (sort of) data index.
1142 ProfileData* MethodData::data_at(int data_index) const {
1143   if (out_of_bounds(data_index)) {
1144     return NULL;
1145   }
1146   DataLayout* data_layout = data_layout_at(data_index);
1147   return data_layout->data_in();
1148 }
1149 
1150 ProfileData* DataLayout::data_in() {
1151   switch (tag()) {
1152   case DataLayout::no_tag:
1153   default:
1154     ShouldNotReachHere();
1155     return NULL;
1156   case DataLayout::bit_data_tag:
1157     return new BitData(this);
1158   case DataLayout::counter_data_tag:
1159     return new CounterData(this);
1160   case DataLayout::jump_data_tag:
1161     return new JumpData(this);
1162   case DataLayout::receiver_type_data_tag:
1163     return new ReceiverTypeData(this);
1164   case DataLayout::virtual_call_data_tag:
1165     return new VirtualCallData(this);
1166   case DataLayout::ret_data_tag:
1167     return new RetData(this);
1168   case DataLayout::branch_data_tag:
1169     return new BranchData(this);
1170   case DataLayout::multi_branch_data_tag:
1171     return new MultiBranchData(this);
1172   case DataLayout::arg_info_data_tag:
1173     return new ArgInfoData(this);
1174   case DataLayout::call_type_data_tag:
1175     return new CallTypeData(this);
1176   case DataLayout::virtual_call_type_data_tag:
1177     return new VirtualCallTypeData(this);
1178   case DataLayout::parameters_type_data_tag:
1179     return new ParametersTypeData(this);
1180   case DataLayout::speculative_trap_data_tag:
1181     return new SpeculativeTrapData(this);
1182   }
1183 }
1184 
1185 // Iteration over data.
1186 ProfileData* MethodData::next_data(ProfileData* current) const {
1187   int current_index = dp_to_di(current->dp());
1188   int next_index = current_index + current->size_in_bytes();
1189   ProfileData* next = data_at(next_index);
1190   return next;
1191 }
1192 
1193 // Give each of the data entries a chance to perform specific
1194 // data initialization.
1195 void MethodData::post_initialize(BytecodeStream* stream) {
1196   ResourceMark rm;
1197   ProfileData* data;
1198   for (data = first_data(); is_valid(data); data = next_data(data)) {
1199     stream->set_start(data->bci());
1200     stream->next();
1201     data->post_initialize(stream, this);
1202   }
1203   if (_parameters_type_data_di != no_parameters) {
1204     parameters_type_data()->post_initialize(NULL, this);
1205   }
1206 }
1207 
1208 // Initialize the MethodData* corresponding to a given method.
1209 MethodData::MethodData(const methodHandle& method, int size, TRAPS)
1210   : _extra_data_lock(Mutex::leaf, "MDO extra data lock"),
1211     _parameters_type_data_di(parameters_uninitialized) {
1212   // Set the method back-pointer.
1213   _method = method();
1214   initialize();
1215 }
1216 
1217 void MethodData::initialize() {
1218   Thread* thread = Thread::current();
1219   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1220   ResourceMark rm(thread);
1221 
1222   init();
1223   set_creation_mileage(mileage_of(method()));
1224 
1225   // Go through the bytecodes and allocate and initialize the
1226   // corresponding data cells.
1227   int data_size = 0;
1228   int empty_bc_count = 0;  // number of bytecodes lacking data
1229   _data[0] = 0;  // apparently not set below.
1230   BytecodeStream stream(methodHandle(thread, method()));
1231   Bytecodes::Code c;
1232   bool needs_speculative_traps = false;
1233   while ((c = stream.next()) >= 0) {
1234     int size_in_bytes = initialize_data(&stream, data_size);
1235     data_size += size_in_bytes;
1236     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1237     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1238   }
1239   _data_size = data_size;
1240   int object_size = in_bytes(data_offset()) + data_size;
1241 
1242   // Add some extra DataLayout cells (at least one) to track stray traps.
1243   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1244   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1245 
1246   // Let's zero the space for the extra data
1247   Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1248 
1249   // Add a cell to record information about modified arguments.
1250   // Set up _args_modified array after traps cells so that
1251   // the code for traps cells works.
1252   DataLayout *dp = data_layout_at(data_size + extra_size);
1253 
1254   int arg_size = method()->size_of_parameters();
1255   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1256 
1257   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1258   object_size += extra_size + arg_data_size;
1259 
1260   int parms_cell = ParametersTypeData::compute_cell_count(method());
1261   // If we are profiling parameters, we reserver an area near the end
1262   // of the MDO after the slots for bytecodes (because there's no bci
1263   // for method entry so they don't fit with the framework for the
1264   // profiling of bytecodes). We store the offset within the MDO of
1265   // this area (or -1 if no parameter is profiled)
1266   if (parms_cell > 0) {
1267     object_size += DataLayout::compute_size_in_bytes(parms_cell);
1268     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1269     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1270     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1271   } else {
1272     _parameters_type_data_di = no_parameters;
1273   }
1274 
1275   // Set an initial hint. Don't use set_hint_di() because
1276   // first_di() may be out of bounds if data_size is 0.
1277   // In that situation, _hint_di is never used, but at
1278   // least well-defined.
1279   _hint_di = first_di();
1280 
1281   post_initialize(&stream);
1282 
1283   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1284   set_size(object_size);
1285 }
1286 
1287 void MethodData::init() {
1288   _invocation_counter.init();
1289   _backedge_counter.init();
1290   _invocation_counter_start = 0;
1291   _backedge_counter_start = 0;
1292 
1293   // Set per-method invoke- and backedge mask.
1294   double scale = 1.0;
1295   methodHandle mh(Thread::current(), _method);
1296   CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
1297   _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1298   _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1299 
1300   _tenure_traps = 0;
1301   _num_loops = 0;
1302   _num_blocks = 0;
1303   _would_profile = unknown;
1304 
1305 #if INCLUDE_JVMCI
1306   _jvmci_ir_size = 0;
1307   _failed_speculations = NULL;
1308 #endif
1309 
1310 #if INCLUDE_RTM_OPT
1311   _rtm_state = NoRTM; // No RTM lock eliding by default
1312   if (UseRTMLocking &&
1313       !CompilerOracle::has_option_string(mh, "NoRTMLockEliding")) {
1314     if (CompilerOracle::has_option_string(mh, "UseRTMLockEliding") || !UseRTMDeopt) {
1315       // Generate RTM lock eliding code without abort ratio calculation code.
1316       _rtm_state = UseRTM;
1317     } else if (UseRTMDeopt) {
1318       // Generate RTM lock eliding code and include abort ratio calculation
1319       // code if UseRTMDeopt is on.
1320       _rtm_state = ProfileRTM;
1321     }
1322   }
1323 #endif
1324 
1325   // Initialize flags and trap history.
1326   _nof_decompiles = 0;
1327   _nof_overflow_recompiles = 0;
1328   _nof_overflow_traps = 0;
1329   clear_escape_info();
1330   assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
1331   Copy::zero_to_words((HeapWord*) &_trap_hist,
1332                       sizeof(_trap_hist) / sizeof(HeapWord));
1333 }
1334 
1335 // Get a measure of how much mileage the method has on it.
1336 int MethodData::mileage_of(Method* method) {
1337   int mileage = 0;
1338   if (TieredCompilation) {
1339     mileage = MAX2(method->invocation_count(), method->backedge_count());
1340   } else {
1341     int iic = method->interpreter_invocation_count();
1342     if (mileage < iic)  mileage = iic;
1343     MethodCounters* mcs = method->method_counters();
1344     if (mcs != NULL) {
1345       InvocationCounter* ic = mcs->invocation_counter();
1346       InvocationCounter* bc = mcs->backedge_counter();
1347       int icval = ic->count();
1348       if (ic->carry()) icval += CompileThreshold;
1349       if (mileage < icval)  mileage = icval;
1350       int bcval = bc->count();
1351       if (bc->carry()) bcval += CompileThreshold;
1352       if (mileage < bcval)  mileage = bcval;
1353     }
1354   }
1355   return mileage;
1356 }
1357 
1358 bool MethodData::is_mature() const {
1359   return CompilationPolicy::policy()->is_mature(_method);
1360 }
1361 
1362 // Translate a bci to its corresponding data index (di).
1363 address MethodData::bci_to_dp(int bci) {
1364   ResourceMark rm;
1365   ProfileData* data = data_before(bci);
1366   ProfileData* prev = NULL;
1367   for ( ; is_valid(data); data = next_data(data)) {
1368     if (data->bci() >= bci) {
1369       if (data->bci() == bci)  set_hint_di(dp_to_di(data->dp()));
1370       else if (prev != NULL)   set_hint_di(dp_to_di(prev->dp()));
1371       return data->dp();
1372     }
1373     prev = data;
1374   }
1375   return (address)limit_data_position();
1376 }
1377 
1378 // Translate a bci to its corresponding data, or NULL.
1379 ProfileData* MethodData::bci_to_data(int bci) {
1380   ProfileData* data = data_before(bci);
1381   for ( ; is_valid(data); data = next_data(data)) {
1382     if (data->bci() == bci) {
1383       set_hint_di(dp_to_di(data->dp()));
1384       return data;
1385     } else if (data->bci() > bci) {
1386       break;
1387     }
1388   }
1389   return bci_to_extra_data(bci, NULL, false);
1390 }
1391 
1392 DataLayout* MethodData::next_extra(DataLayout* dp) {
1393   int nb_cells = 0;
1394   switch(dp->tag()) {
1395   case DataLayout::bit_data_tag:
1396   case DataLayout::no_tag:
1397     nb_cells = BitData::static_cell_count();
1398     break;
1399   case DataLayout::speculative_trap_data_tag:
1400     nb_cells = SpeculativeTrapData::static_cell_count();
1401     break;
1402   default:
1403     fatal("unexpected tag %d", dp->tag());
1404   }
1405   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1406 }
1407 
1408 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1409   DataLayout* end = args_data_limit();
1410 
1411   for (;; dp = next_extra(dp)) {
1412     assert(dp < end, "moved past end of extra data");
1413     // No need for "Atomic::load_acquire" ops,
1414     // since the data structure is monotonic.
1415     switch(dp->tag()) {
1416     case DataLayout::no_tag:
1417       return NULL;
1418     case DataLayout::arg_info_data_tag:
1419       dp = end;
1420       return NULL; // ArgInfoData is at the end of extra data section.
1421     case DataLayout::bit_data_tag:
1422       if (m == NULL && dp->bci() == bci) {
1423         return new BitData(dp);
1424       }
1425       break;
1426     case DataLayout::speculative_trap_data_tag:
1427       if (m != NULL) {
1428         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1429         // data->method() may be null in case of a concurrent
1430         // allocation. Maybe it's for the same method. Try to use that
1431         // entry in that case.
1432         if (dp->bci() == bci) {
1433           if (data->method() == NULL) {
1434             assert(concurrent, "impossible because no concurrent allocation");
1435             return NULL;
1436           } else if (data->method() == m) {
1437             return data;
1438           }
1439         }
1440       }
1441       break;
1442     default:
1443       fatal("unexpected tag %d", dp->tag());
1444     }
1445   }
1446   return NULL;
1447 }
1448 
1449 
1450 // Translate a bci to its corresponding extra data, or NULL.
1451 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1452   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1453   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1454          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1455          "code needs to be adjusted");
1456 
1457   // Do not create one of these if method has been redefined.
1458   if (m != NULL && m->is_old()) {
1459     return NULL;
1460   }
1461 
1462   DataLayout* dp  = extra_data_base();
1463   DataLayout* end = args_data_limit();
1464 
1465   // Allocation in the extra data space has to be atomic because not
1466   // all entries have the same size and non atomic concurrent
1467   // allocation would result in a corrupted extra data space.
1468   ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1469   if (result != NULL) {
1470     return result;
1471   }
1472 
1473   if (create_if_missing && dp < end) {
1474     MutexLocker ml(&_extra_data_lock);
1475     // Check again now that we have the lock. Another thread may
1476     // have added extra data entries.
1477     ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1478     if (result != NULL || dp >= end) {
1479       return result;
1480     }
1481 
1482     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
1483     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1484     u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1485     // SpeculativeTrapData is 2 slots. Make sure we have room.
1486     if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
1487       return NULL;
1488     }
1489     DataLayout temp;
1490     temp.initialize(tag, bci, 0);
1491 
1492     dp->set_header(temp.header());
1493     assert(dp->tag() == tag, "sane");
1494     assert(dp->bci() == bci, "no concurrent allocation");
1495     if (tag == DataLayout::bit_data_tag) {
1496       return new BitData(dp);
1497     } else {
1498       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1499       data->set_method(m);
1500       return data;
1501     }
1502   }
1503   return NULL;
1504 }
1505 
1506 ArgInfoData *MethodData::arg_info() {
1507   DataLayout* dp    = extra_data_base();
1508   DataLayout* end   = args_data_limit();
1509   for (; dp < end; dp = next_extra(dp)) {
1510     if (dp->tag() == DataLayout::arg_info_data_tag)
1511       return new ArgInfoData(dp);
1512   }
1513   return NULL;
1514 }
1515 
1516 // Printing
1517 
1518 void MethodData::print_on(outputStream* st) const {
1519   assert(is_methodData(), "should be method data");
1520   st->print("method data for ");
1521   method()->print_value_on(st);
1522   st->cr();
1523   print_data_on(st);
1524 }
1525 
1526 void MethodData::print_value_on(outputStream* st) const {
1527   assert(is_methodData(), "should be method data");
1528   st->print("method data for ");
1529   method()->print_value_on(st);
1530 }
1531 
1532 void MethodData::print_data_on(outputStream* st) const {
1533   ResourceMark rm;
1534   ProfileData* data = first_data();
1535   if (_parameters_type_data_di != no_parameters) {
1536     parameters_type_data()->print_data_on(st);
1537   }
1538   for ( ; is_valid(data); data = next_data(data)) {
1539     st->print("%d", dp_to_di(data->dp()));
1540     st->fill_to(6);
1541     data->print_data_on(st, this);
1542   }
1543   st->print_cr("--- Extra data:");
1544   DataLayout* dp    = extra_data_base();
1545   DataLayout* end   = args_data_limit();
1546   for (;; dp = next_extra(dp)) {
1547     assert(dp < end, "moved past end of extra data");
1548     // No need for "Atomic::load_acquire" ops,
1549     // since the data structure is monotonic.
1550     switch(dp->tag()) {
1551     case DataLayout::no_tag:
1552       continue;
1553     case DataLayout::bit_data_tag:
1554       data = new BitData(dp);
1555       break;
1556     case DataLayout::speculative_trap_data_tag:
1557       data = new SpeculativeTrapData(dp);
1558       break;
1559     case DataLayout::arg_info_data_tag:
1560       data = new ArgInfoData(dp);
1561       dp = end; // ArgInfoData is at the end of extra data section.
1562       break;
1563     default:
1564       fatal("unexpected tag %d", dp->tag());
1565     }
1566     st->print("%d", dp_to_di(data->dp()));
1567     st->fill_to(6);
1568     data->print_data_on(st);
1569     if (dp >= end) return;
1570   }
1571 }
1572 
1573 // Verification
1574 
1575 void MethodData::verify_on(outputStream* st) {
1576   guarantee(is_methodData(), "object must be method data");
1577   // guarantee(m->is_perm(), "should be in permspace");
1578   this->verify_data_on(st);
1579 }
1580 
1581 void MethodData::verify_data_on(outputStream* st) {
1582   NEEDS_CLEANUP;
1583   // not yet implemented.
1584 }
1585 
1586 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1587   if (m->is_compiled_lambda_form()) {
1588     return true;
1589   }
1590 
1591   Bytecode_invoke inv(m , bci);
1592   return inv.is_invokedynamic() || inv.is_invokehandle();
1593 }
1594 
1595 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1596   Bytecode_invoke inv(m , bci);
1597   if (inv.is_invokevirtual()) {
1598     if (inv.klass() == vmSymbols::jdk_internal_misc_Unsafe() ||
1599         inv.klass() == vmSymbols::sun_misc_Unsafe()) {
1600       ResourceMark rm;
1601       char* name = inv.name()->as_C_string();
1602       if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) {
1603         return true;
1604       }
1605     }
1606   }
1607   return false;
1608 }
1609 
1610 int MethodData::profile_arguments_flag() {
1611   return TypeProfileLevel % 10;
1612 }
1613 
1614 bool MethodData::profile_arguments() {
1615   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
1616 }
1617 
1618 bool MethodData::profile_arguments_jsr292_only() {
1619   return profile_arguments_flag() == type_profile_jsr292;
1620 }
1621 
1622 bool MethodData::profile_all_arguments() {
1623   return profile_arguments_flag() == type_profile_all;
1624 }
1625 
1626 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1627   if (!profile_arguments()) {
1628     return false;
1629   }
1630 
1631   if (profile_all_arguments()) {
1632     return true;
1633   }
1634 
1635   if (profile_unsafe(m, bci)) {
1636     return true;
1637   }
1638 
1639   assert(profile_arguments_jsr292_only(), "inconsistent");
1640   return profile_jsr292(m, bci);
1641 }
1642 
1643 int MethodData::profile_return_flag() {
1644   return (TypeProfileLevel % 100) / 10;
1645 }
1646 
1647 bool MethodData::profile_return() {
1648   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1649 }
1650 
1651 bool MethodData::profile_return_jsr292_only() {
1652   return profile_return_flag() == type_profile_jsr292;
1653 }
1654 
1655 bool MethodData::profile_all_return() {
1656   return profile_return_flag() == type_profile_all;
1657 }
1658 
1659 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1660   if (!profile_return()) {
1661     return false;
1662   }
1663 
1664   if (profile_all_return()) {
1665     return true;
1666   }
1667 
1668   assert(profile_return_jsr292_only(), "inconsistent");
1669   return profile_jsr292(m, bci);
1670 }
1671 
1672 int MethodData::profile_parameters_flag() {
1673   return TypeProfileLevel / 100;
1674 }
1675 
1676 bool MethodData::profile_parameters() {
1677   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1678 }
1679 
1680 bool MethodData::profile_parameters_jsr292_only() {
1681   return profile_parameters_flag() == type_profile_jsr292;
1682 }
1683 
1684 bool MethodData::profile_all_parameters() {
1685   return profile_parameters_flag() == type_profile_all;
1686 }
1687 
1688 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1689   if (!profile_parameters()) {
1690     return false;
1691   }
1692 
1693   if (profile_all_parameters()) {
1694     return true;
1695   }
1696 
1697   assert(profile_parameters_jsr292_only(), "inconsistent");
1698   return m->is_compiled_lambda_form();
1699 }
1700 
1701 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1702   log_trace(cds)("Iter(MethodData): %p", this);
1703   it->push(&_method);
1704 }
1705 
1706 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1707   if (shift == 0) {
1708     return;
1709   }
1710   if (!reset) {
1711     // Move all cells of trap entry at dp left by "shift" cells
1712     intptr_t* start = (intptr_t*)dp;
1713     intptr_t* end = (intptr_t*)next_extra(dp);
1714     for (intptr_t* ptr = start; ptr < end; ptr++) {
1715       *(ptr-shift) = *ptr;
1716     }
1717   } else {
1718     // Reset "shift" cells stopping at dp
1719     intptr_t* start = ((intptr_t*)dp) - shift;
1720     intptr_t* end = (intptr_t*)dp;
1721     for (intptr_t* ptr = start; ptr < end; ptr++) {
1722       *ptr = 0;
1723     }
1724   }
1725 }
1726 
1727 // Check for entries that reference an unloaded method
1728 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1729   bool _always_clean;
1730 public:
1731   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1732   bool is_live(Method* m) {
1733     return !(_always_clean) && m->method_holder()->is_loader_alive();
1734   }
1735 };
1736 
1737 // Check for entries that reference a redefined method
1738 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1739 public:
1740   CleanExtraDataMethodClosure() {}
1741   bool is_live(Method* m) { return !m->is_old(); }
1742 };
1743 
1744 
1745 // Remove SpeculativeTrapData entries that reference an unloaded or
1746 // redefined method
1747 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1748   DataLayout* dp  = extra_data_base();
1749   DataLayout* end = args_data_limit();
1750 
1751   int shift = 0;
1752   for (; dp < end; dp = next_extra(dp)) {
1753     switch(dp->tag()) {
1754     case DataLayout::speculative_trap_data_tag: {
1755       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1756       Method* m = data->method();
1757       assert(m != NULL, "should have a method");
1758       if (!cl->is_live(m)) {
1759         // "shift" accumulates the number of cells for dead
1760         // SpeculativeTrapData entries that have been seen so
1761         // far. Following entries must be shifted left by that many
1762         // cells to remove the dead SpeculativeTrapData entries.
1763         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1764       } else {
1765         // Shift this entry left if it follows dead
1766         // SpeculativeTrapData entries
1767         clean_extra_data_helper(dp, shift);
1768       }
1769       break;
1770     }
1771     case DataLayout::bit_data_tag:
1772       // Shift this entry left if it follows dead SpeculativeTrapData
1773       // entries
1774       clean_extra_data_helper(dp, shift);
1775       continue;
1776     case DataLayout::no_tag:
1777     case DataLayout::arg_info_data_tag:
1778       // We are at end of the live trap entries. The previous "shift"
1779       // cells contain entries that are either dead or were shifted
1780       // left. They need to be reset to no_tag
1781       clean_extra_data_helper(dp, shift, true);
1782       return;
1783     default:
1784       fatal("unexpected tag %d", dp->tag());
1785     }
1786   }
1787 }
1788 
1789 // Verify there's no unloaded or redefined method referenced by a
1790 // SpeculativeTrapData entry
1791 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1792 #ifdef ASSERT
1793   DataLayout* dp  = extra_data_base();
1794   DataLayout* end = args_data_limit();
1795 
1796   for (; dp < end; dp = next_extra(dp)) {
1797     switch(dp->tag()) {
1798     case DataLayout::speculative_trap_data_tag: {
1799       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1800       Method* m = data->method();
1801       assert(m != NULL && cl->is_live(m), "Method should exist");
1802       break;
1803     }
1804     case DataLayout::bit_data_tag:
1805       continue;
1806     case DataLayout::no_tag:
1807     case DataLayout::arg_info_data_tag:
1808       return;
1809     default:
1810       fatal("unexpected tag %d", dp->tag());
1811     }
1812   }
1813 #endif
1814 }
1815 
1816 void MethodData::clean_method_data(bool always_clean) {
1817   ResourceMark rm;
1818   for (ProfileData* data = first_data();
1819        is_valid(data);
1820        data = next_data(data)) {
1821     data->clean_weak_klass_links(always_clean);
1822   }
1823   ParametersTypeData* parameters = parameters_type_data();
1824   if (parameters != NULL) {
1825     parameters->clean_weak_klass_links(always_clean);
1826   }
1827 
1828   CleanExtraDataKlassClosure cl(always_clean);
1829   clean_extra_data(&cl);
1830   verify_extra_data_clean(&cl);
1831 }
1832 
1833 // This is called during redefinition to clean all "old" redefined
1834 // methods out of MethodData for all methods.
1835 void MethodData::clean_weak_method_links() {
1836   ResourceMark rm;
1837   for (ProfileData* data = first_data();
1838        is_valid(data);
1839        data = next_data(data)) {
1840     data->clean_weak_method_links();
1841   }
1842 
1843   CleanExtraDataMethodClosure cl;
1844   clean_extra_data(&cl);
1845   verify_extra_data_clean(&cl);
1846 }
1847 
1848 #ifdef ASSERT
1849 void MethodData::verify_clean_weak_method_links() {
1850   ResourceMark rm;
1851   for (ProfileData* data = first_data();
1852        is_valid(data);
1853        data = next_data(data)) {
1854     data->verify_clean_weak_method_links();
1855   }
1856 
1857   CleanExtraDataMethodClosure cl;
1858   verify_extra_data_clean(&cl);
1859 }
1860 #endif // ASSERT