1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compilationPolicy.hpp"
  28 #include "compiler/compilerOracle.hpp"
  29 #include "interpreter/bytecode.hpp"
  30 #include "interpreter/bytecodeStream.hpp"
  31 #include "interpreter/linkResolver.hpp"
  32 #include "memory/heapInspection.hpp"
  33 #include "memory/metaspaceClosure.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/methodData.inline.hpp"
  36 #include "prims/jvmtiRedefineClasses.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/atomic.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 #include "runtime/safepointVerifiers.hpp"
  43 #include "utilities/align.hpp"
  44 #include "utilities/copy.hpp"
  45 
  46 // ==================================================================
  47 // DataLayout
  48 //
  49 // Overlay for generic profiling data.
  50 
  51 // Some types of data layouts need a length field.
  52 bool DataLayout::needs_array_len(u1 tag) {
  53   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  54 }
  55 
  56 // Perform generic initialization of the data.  More specific
  57 // initialization occurs in overrides of ProfileData::post_initialize.
  58 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  59   _header._bits = (intptr_t)0;
  60   _header._struct._tag = tag;
  61   _header._struct._bci = bci;
  62   for (int i = 0; i < cell_count; i++) {
  63     set_cell_at(i, (intptr_t)0);
  64   }
  65   if (needs_array_len(tag)) {
  66     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  67   }
  68   if (tag == call_type_data_tag) {
  69     CallTypeData::initialize(this, cell_count);
  70   } else if (tag == virtual_call_type_data_tag) {
  71     VirtualCallTypeData::initialize(this, cell_count);
  72   }
  73 }
  74 
  75 void DataLayout::clean_weak_klass_links(bool always_clean) {
  76   ResourceMark m;
  77   data_in()->clean_weak_klass_links(always_clean);
  78 }
  79 
  80 
  81 // ==================================================================
  82 // ProfileData
  83 //
  84 // A ProfileData object is created to refer to a section of profiling
  85 // data in a structured way.
  86 
  87 // Constructor for invalid ProfileData.
  88 ProfileData::ProfileData() {
  89   _data = NULL;
  90 }
  91 
  92 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  93   DataLayout* dp  = md->extra_data_base();
  94   DataLayout* end = md->args_data_limit();
  95   stringStream ss;
  96   for (;; dp = MethodData::next_extra(dp)) {
  97     assert(dp < end, "moved past end of extra data");
  98     switch(dp->tag()) {
  99     case DataLayout::speculative_trap_data_tag:
 100       if (dp->bci() == bci()) {
 101         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 102         int trap = data->trap_state();
 103         char buf[100];
 104         ss.print("trap/");
 105         data->method()->print_short_name(&ss);
 106         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 107       }
 108       break;
 109     case DataLayout::bit_data_tag:
 110       break;
 111     case DataLayout::no_tag:
 112     case DataLayout::arg_info_data_tag:
 113       return ss.as_string();
 114       break;
 115     default:
 116       fatal("unexpected tag %d", dp->tag());
 117     }
 118   }
 119   return NULL;
 120 }
 121 
 122 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 123   print_data_on(st, print_data_on_helper(md));
 124 }
 125 
 126 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 127   st->print("bci: %d", bci());
 128   st->fill_to(tab_width_one);
 129   st->print("%s", name);
 130   tab(st);
 131   int trap = trap_state();
 132   if (trap != 0) {
 133     char buf[100];
 134     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 135   }
 136   if (extra != NULL) {
 137     st->print("%s", extra);
 138   }
 139   int flags = data()->flags();
 140   if (flags != 0) {
 141     st->print("flags(%d) ", flags);
 142   }
 143 }
 144 
 145 void ProfileData::tab(outputStream* st, bool first) const {
 146   st->fill_to(first ? tab_width_one : tab_width_two);
 147 }
 148 
 149 // ==================================================================
 150 // BitData
 151 //
 152 // A BitData corresponds to a one-bit flag.  This is used to indicate
 153 // whether a checkcast bytecode has seen a null value.
 154 
 155 
 156 void BitData::print_data_on(outputStream* st, const char* extra) const {
 157   print_shared(st, "BitData", extra);
 158   st->cr();
 159 }
 160 
 161 // ==================================================================
 162 // CounterData
 163 //
 164 // A CounterData corresponds to a simple counter.
 165 
 166 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 167   print_shared(st, "CounterData", extra);
 168   st->print_cr("count(%u)", count());
 169 }
 170 
 171 // ==================================================================
 172 // JumpData
 173 //
 174 // A JumpData is used to access profiling information for a direct
 175 // branch.  It is a counter, used for counting the number of branches,
 176 // plus a data displacement, used for realigning the data pointer to
 177 // the corresponding target bci.
 178 
 179 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 180   assert(stream->bci() == bci(), "wrong pos");
 181   int target;
 182   Bytecodes::Code c = stream->code();
 183   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 184     target = stream->dest_w();
 185   } else {
 186     target = stream->dest();
 187   }
 188   int my_di = mdo->dp_to_di(dp());
 189   int target_di = mdo->bci_to_di(target);
 190   int offset = target_di - my_di;
 191   set_displacement(offset);
 192 }
 193 
 194 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 195   print_shared(st, "JumpData", extra);
 196   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 197 }
 198 
 199 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 200   // Parameter profiling include the receiver
 201   int args_count = include_receiver ? 1 : 0;
 202   ResourceMark rm;
 203   SignatureStream ss(signature);
 204   args_count += ss.reference_parameter_count();
 205   args_count = MIN2(args_count, max);
 206   return args_count * per_arg_cell_count;
 207 }
 208 
 209 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 210   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 211   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 212   const methodHandle m = stream->method();
 213   int bci = stream->bci();
 214   Bytecode_invoke inv(m, bci);
 215   int args_cell = 0;
 216   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 217     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 218   }
 219   int ret_cell = 0;
 220   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 221     ret_cell = ReturnTypeEntry::static_cell_count();
 222   }
 223   int header_cell = 0;
 224   if (args_cell + ret_cell > 0) {
 225     header_cell = header_cell_count();
 226   }
 227 
 228   return header_cell + args_cell + ret_cell;
 229 }
 230 
 231 class ArgumentOffsetComputer : public SignatureInfo {
 232 private:
 233   int _max;
 234   GrowableArray<int> _offsets;
 235 
 236   void set(int size, BasicType type) { _size += size; }
 237   void do_object(int begin, int end) {
 238     if (_offsets.length() < _max) {
 239       _offsets.push(_size);
 240     }
 241     SignatureInfo::do_object(begin, end);
 242   }
 243   void do_array (int begin, int end) {
 244     if (_offsets.length() < _max) {
 245       _offsets.push(_size);
 246     }
 247     SignatureInfo::do_array(begin, end);
 248   }
 249 
 250 public:
 251   ArgumentOffsetComputer(Symbol* signature, int max)
 252     : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
 253   }
 254 
 255   int total() { lazy_iterate_parameters(); return _size; }
 256 
 257   int off_at(int i) const { return _offsets.at(i); }
 258 };
 259 
 260 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 261   ResourceMark rm;
 262   int start = 0;
 263   // Parameter profiling include the receiver
 264   if (include_receiver && has_receiver) {
 265     set_stack_slot(0, 0);
 266     set_type(0, type_none());
 267     start += 1;
 268   }
 269   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 270   aos.total();
 271   for (int i = start; i < _number_of_entries; i++) {
 272     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 273     set_type(i, type_none());
 274   }
 275 }
 276 
 277 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 278   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 279   Bytecode_invoke inv(stream->method(), stream->bci());
 280 
 281   SignatureStream ss(inv.signature());
 282   if (has_arguments()) {
 283 #ifdef ASSERT
 284     ResourceMark rm;
 285     int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
 286     assert(count > 0, "room for args type but none found?");
 287     check_number_of_arguments(count);
 288 #endif
 289     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 290   }
 291 
 292   if (has_return()) {
 293     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 294     _ret.post_initialize();
 295   }
 296 }
 297 
 298 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 299   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 300   Bytecode_invoke inv(stream->method(), stream->bci());
 301 
 302   if (has_arguments()) {
 303 #ifdef ASSERT
 304     ResourceMark rm;
 305     SignatureStream ss(inv.signature());
 306     int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
 307     assert(count > 0, "room for args type but none found?");
 308     check_number_of_arguments(count);
 309 #endif
 310     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 311   }
 312 
 313   if (has_return()) {
 314     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 315     _ret.post_initialize();
 316   }
 317 }
 318 
 319 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 320   for (int i = 0; i < _number_of_entries; i++) {
 321     intptr_t p = type(i);
 322     Klass* k = (Klass*)klass_part(p);
 323     if (k != NULL && (always_clean || !k->is_loader_alive())) {
 324       set_type(i, with_status((Klass*)NULL, p));
 325     }
 326   }
 327 }
 328 
 329 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 330   intptr_t p = type();
 331   Klass* k = (Klass*)klass_part(p);
 332   if (k != NULL && (always_clean || !k->is_loader_alive())) {
 333     set_type(with_status((Klass*)NULL, p));
 334   }
 335 }
 336 
 337 bool TypeEntriesAtCall::return_profiling_enabled() {
 338   return MethodData::profile_return();
 339 }
 340 
 341 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 342   return MethodData::profile_arguments();
 343 }
 344 
 345 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 346   if (is_type_none(k)) {
 347     st->print("none");
 348   } else if (is_type_unknown(k)) {
 349     st->print("unknown");
 350   } else {
 351     valid_klass(k)->print_value_on(st);
 352   }
 353   if (was_null_seen(k)) {
 354     st->print(" (null seen)");
 355   }
 356 }
 357 
 358 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 359   for (int i = 0; i < _number_of_entries; i++) {
 360     _pd->tab(st);
 361     st->print("%d: stack(%u) ", i, stack_slot(i));
 362     print_klass(st, type(i));
 363     st->cr();
 364   }
 365 }
 366 
 367 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 368   _pd->tab(st);
 369   print_klass(st, type());
 370   st->cr();
 371 }
 372 
 373 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 374   CounterData::print_data_on(st, extra);
 375   if (has_arguments()) {
 376     tab(st, true);
 377     st->print("argument types");
 378     _args.print_data_on(st);
 379   }
 380   if (has_return()) {
 381     tab(st, true);
 382     st->print("return type");
 383     _ret.print_data_on(st);
 384   }
 385 }
 386 
 387 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 388   VirtualCallData::print_data_on(st, extra);
 389   if (has_arguments()) {
 390     tab(st, true);
 391     st->print("argument types");
 392     _args.print_data_on(st);
 393   }
 394   if (has_return()) {
 395     tab(st, true);
 396     st->print("return type");
 397     _ret.print_data_on(st);
 398   }
 399 }
 400 
 401 // ==================================================================
 402 // ReceiverTypeData
 403 //
 404 // A ReceiverTypeData is used to access profiling information about a
 405 // dynamic type check.  It consists of a counter which counts the total times
 406 // that the check is reached, and a series of (Klass*, count) pairs
 407 // which are used to store a type profile for the receiver of the check.
 408 
 409 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 410     for (uint row = 0; row < row_limit(); row++) {
 411     Klass* p = receiver(row);
 412     if (p != NULL && (always_clean || !p->is_loader_alive())) {
 413       clear_row(row);
 414     }
 415   }
 416 }
 417 
 418 #if INCLUDE_JVMCI
 419 void VirtualCallData::clean_weak_klass_links(bool always_clean) {
 420   ReceiverTypeData::clean_weak_klass_links(always_clean);
 421   for (uint row = 0; row < method_row_limit(); row++) {
 422     Method* p = method(row);
 423     if (p != NULL && (always_clean || !p->method_holder()->is_loader_alive())) {
 424       clear_method_row(row);
 425     }
 426   }
 427 }
 428 
 429 void VirtualCallData::clean_weak_method_links() {
 430   ReceiverTypeData::clean_weak_method_links();
 431   for (uint row = 0; row < method_row_limit(); row++) {
 432     Method* p = method(row);
 433     if (p != NULL && p->is_old()) {
 434       clear_method_row(row);
 435     }
 436   }
 437 }
 438 #endif // INCLUDE_JVMCI
 439 
 440 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 441   uint row;
 442   int entries = 0;
 443   for (row = 0; row < row_limit(); row++) {
 444     if (receiver(row) != NULL)  entries++;
 445   }
 446 #if INCLUDE_JVMCI
 447   st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
 448 #else
 449   st->print_cr("count(%u) entries(%u)", count(), entries);
 450 #endif
 451   int total = count();
 452   for (row = 0; row < row_limit(); row++) {
 453     if (receiver(row) != NULL) {
 454       total += receiver_count(row);
 455     }
 456   }
 457   for (row = 0; row < row_limit(); row++) {
 458     if (receiver(row) != NULL) {
 459       tab(st);
 460       receiver(row)->print_value_on(st);
 461       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 462     }
 463   }
 464 }
 465 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 466   print_shared(st, "ReceiverTypeData", extra);
 467   print_receiver_data_on(st);
 468 }
 469 
 470 #if INCLUDE_JVMCI
 471 void VirtualCallData::print_method_data_on(outputStream* st) const {
 472   uint row;
 473   int entries = 0;
 474   for (row = 0; row < method_row_limit(); row++) {
 475     if (method(row) != NULL) entries++;
 476   }
 477   tab(st);
 478   st->print_cr("method_entries(%u)", entries);
 479   int total = count();
 480   for (row = 0; row < method_row_limit(); row++) {
 481     if (method(row) != NULL) {
 482       total += method_count(row);
 483     }
 484   }
 485   for (row = 0; row < method_row_limit(); row++) {
 486     if (method(row) != NULL) {
 487       tab(st);
 488       method(row)->print_value_on(st);
 489       st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total);
 490     }
 491   }
 492 }
 493 #endif // INCLUDE_JVMCI
 494 
 495 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 496   print_shared(st, "VirtualCallData", extra);
 497   print_receiver_data_on(st);
 498   print_method_data_on(st);
 499 }
 500 
 501 // ==================================================================
 502 // RetData
 503 //
 504 // A RetData is used to access profiling information for a ret bytecode.
 505 // It is composed of a count of the number of times that the ret has
 506 // been executed, followed by a series of triples of the form
 507 // (bci, count, di) which count the number of times that some bci was the
 508 // target of the ret and cache a corresponding displacement.
 509 
 510 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 511   for (uint row = 0; row < row_limit(); row++) {
 512     set_bci_displacement(row, -1);
 513     set_bci(row, no_bci);
 514   }
 515   // release so other threads see a consistent state.  bci is used as
 516   // a valid flag for bci_displacement.
 517   OrderAccess::release();
 518 }
 519 
 520 // This routine needs to atomically update the RetData structure, so the
 521 // caller needs to hold the RetData_lock before it gets here.  Since taking
 522 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 523 // wrapper around a derived oop, taking the lock in _this_ method will
 524 // basically cause the 'this' pointer's _data field to contain junk after the
 525 // lock.  We require the caller to take the lock before making the ProfileData
 526 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 527 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 528   // First find the mdp which corresponds to the return bci.
 529   address mdp = h_mdo->bci_to_dp(return_bci);
 530 
 531   // Now check to see if any of the cache slots are open.
 532   for (uint row = 0; row < row_limit(); row++) {
 533     if (bci(row) == no_bci) {
 534       set_bci_displacement(row, mdp - dp());
 535       set_bci_count(row, DataLayout::counter_increment);
 536       // Barrier to ensure displacement is written before the bci; allows
 537       // the interpreter to read displacement without fear of race condition.
 538       release_set_bci(row, return_bci);
 539       break;
 540     }
 541   }
 542   return mdp;
 543 }
 544 
 545 void RetData::print_data_on(outputStream* st, const char* extra) const {
 546   print_shared(st, "RetData", extra);
 547   uint row;
 548   int entries = 0;
 549   for (row = 0; row < row_limit(); row++) {
 550     if (bci(row) != no_bci)  entries++;
 551   }
 552   st->print_cr("count(%u) entries(%u)", count(), entries);
 553   for (row = 0; row < row_limit(); row++) {
 554     if (bci(row) != no_bci) {
 555       tab(st);
 556       st->print_cr("bci(%d: count(%u) displacement(%d))",
 557                    bci(row), bci_count(row), bci_displacement(row));
 558     }
 559   }
 560 }
 561 
 562 // ==================================================================
 563 // BranchData
 564 //
 565 // A BranchData is used to access profiling data for a two-way branch.
 566 // It consists of taken and not_taken counts as well as a data displacement
 567 // for the taken case.
 568 
 569 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 570   assert(stream->bci() == bci(), "wrong pos");
 571   int target = stream->dest();
 572   int my_di = mdo->dp_to_di(dp());
 573   int target_di = mdo->bci_to_di(target);
 574   int offset = target_di - my_di;
 575   set_displacement(offset);
 576 }
 577 
 578 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 579   print_shared(st, "BranchData", extra);
 580   st->print_cr("taken(%u) displacement(%d)",
 581                taken(), displacement());
 582   tab(st);
 583   st->print_cr("not taken(%u)", not_taken());
 584 }
 585 
 586 // ==================================================================
 587 // MultiBranchData
 588 //
 589 // A MultiBranchData is used to access profiling information for
 590 // a multi-way branch (*switch bytecodes).  It consists of a series
 591 // of (count, displacement) pairs, which count the number of times each
 592 // case was taken and specify the data displacment for each branch target.
 593 
 594 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 595   int cell_count = 0;
 596   if (stream->code() == Bytecodes::_tableswitch) {
 597     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 598     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 599   } else {
 600     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 601     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 602   }
 603   return cell_count;
 604 }
 605 
 606 void MultiBranchData::post_initialize(BytecodeStream* stream,
 607                                       MethodData* mdo) {
 608   assert(stream->bci() == bci(), "wrong pos");
 609   int target;
 610   int my_di;
 611   int target_di;
 612   int offset;
 613   if (stream->code() == Bytecodes::_tableswitch) {
 614     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 615     int len = sw.length();
 616     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 617     for (int count = 0; count < len; count++) {
 618       target = sw.dest_offset_at(count) + bci();
 619       my_di = mdo->dp_to_di(dp());
 620       target_di = mdo->bci_to_di(target);
 621       offset = target_di - my_di;
 622       set_displacement_at(count, offset);
 623     }
 624     target = sw.default_offset() + bci();
 625     my_di = mdo->dp_to_di(dp());
 626     target_di = mdo->bci_to_di(target);
 627     offset = target_di - my_di;
 628     set_default_displacement(offset);
 629 
 630   } else {
 631     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 632     int npairs = sw.number_of_pairs();
 633     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 634     for (int count = 0; count < npairs; count++) {
 635       LookupswitchPair pair = sw.pair_at(count);
 636       target = pair.offset() + bci();
 637       my_di = mdo->dp_to_di(dp());
 638       target_di = mdo->bci_to_di(target);
 639       offset = target_di - my_di;
 640       set_displacement_at(count, offset);
 641     }
 642     target = sw.default_offset() + bci();
 643     my_di = mdo->dp_to_di(dp());
 644     target_di = mdo->bci_to_di(target);
 645     offset = target_di - my_di;
 646     set_default_displacement(offset);
 647   }
 648 }
 649 
 650 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 651   print_shared(st, "MultiBranchData", extra);
 652   st->print_cr("default_count(%u) displacement(%d)",
 653                default_count(), default_displacement());
 654   int cases = number_of_cases();
 655   for (int i = 0; i < cases; i++) {
 656     tab(st);
 657     st->print_cr("count(%u) displacement(%d)",
 658                  count_at(i), displacement_at(i));
 659   }
 660 }
 661 
 662 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 663   print_shared(st, "ArgInfoData", extra);
 664   int nargs = number_of_args();
 665   for (int i = 0; i < nargs; i++) {
 666     st->print("  0x%x", arg_modified(i));
 667   }
 668   st->cr();
 669 }
 670 
 671 int ParametersTypeData::compute_cell_count(Method* m) {
 672   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 673     return 0;
 674   }
 675   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 676   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 677   if (obj_args > 0) {
 678     return obj_args + 1; // 1 cell for array len
 679   }
 680   return 0;
 681 }
 682 
 683 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 684   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 685 }
 686 
 687 bool ParametersTypeData::profiling_enabled() {
 688   return MethodData::profile_parameters();
 689 }
 690 
 691 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 692   st->print("parameter types"); // FIXME extra ignored?
 693   _parameters.print_data_on(st);
 694 }
 695 
 696 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 697   print_shared(st, "SpeculativeTrapData", extra);
 698   tab(st);
 699   method()->print_short_name(st);
 700   st->cr();
 701 }
 702 
 703 // ==================================================================
 704 // MethodData*
 705 //
 706 // A MethodData* holds information which has been collected about
 707 // a method.
 708 
 709 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 710   int size = MethodData::compute_allocation_size_in_words(method);
 711 
 712   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 713     MethodData(method, size, THREAD);
 714 }
 715 
 716 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 717   if (is_client_compilation_mode_vm()) {
 718     return no_profile_data;
 719   }
 720   switch (code) {
 721   case Bytecodes::_checkcast:
 722   case Bytecodes::_instanceof:
 723   case Bytecodes::_aastore:
 724     if (TypeProfileCasts) {
 725       return ReceiverTypeData::static_cell_count();
 726     } else {
 727       return BitData::static_cell_count();
 728     }
 729   case Bytecodes::_invokespecial:
 730   case Bytecodes::_invokestatic:
 731     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 732       return variable_cell_count;
 733     } else {
 734       return CounterData::static_cell_count();
 735     }
 736   case Bytecodes::_goto:
 737   case Bytecodes::_goto_w:
 738   case Bytecodes::_jsr:
 739   case Bytecodes::_jsr_w:
 740     return JumpData::static_cell_count();
 741   case Bytecodes::_invokevirtual:
 742   case Bytecodes::_invokeinterface:
 743     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 744       return variable_cell_count;
 745     } else {
 746       return VirtualCallData::static_cell_count();
 747     }
 748   case Bytecodes::_invokedynamic:
 749     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 750       return variable_cell_count;
 751     } else {
 752       return CounterData::static_cell_count();
 753     }
 754   case Bytecodes::_ret:
 755     return RetData::static_cell_count();
 756   case Bytecodes::_ifeq:
 757   case Bytecodes::_ifne:
 758   case Bytecodes::_iflt:
 759   case Bytecodes::_ifge:
 760   case Bytecodes::_ifgt:
 761   case Bytecodes::_ifle:
 762   case Bytecodes::_if_icmpeq:
 763   case Bytecodes::_if_icmpne:
 764   case Bytecodes::_if_icmplt:
 765   case Bytecodes::_if_icmpge:
 766   case Bytecodes::_if_icmpgt:
 767   case Bytecodes::_if_icmple:
 768   case Bytecodes::_if_acmpeq:
 769   case Bytecodes::_if_acmpne:
 770   case Bytecodes::_ifnull:
 771   case Bytecodes::_ifnonnull:
 772     return BranchData::static_cell_count();
 773   case Bytecodes::_lookupswitch:
 774   case Bytecodes::_tableswitch:
 775     return variable_cell_count;
 776   default:
 777     return no_profile_data;
 778   }
 779 }
 780 
 781 // Compute the size of the profiling information corresponding to
 782 // the current bytecode.
 783 int MethodData::compute_data_size(BytecodeStream* stream) {
 784   int cell_count = bytecode_cell_count(stream->code());
 785   if (cell_count == no_profile_data) {
 786     return 0;
 787   }
 788   if (cell_count == variable_cell_count) {
 789     switch (stream->code()) {
 790     case Bytecodes::_lookupswitch:
 791     case Bytecodes::_tableswitch:
 792       cell_count = MultiBranchData::compute_cell_count(stream);
 793       break;
 794     case Bytecodes::_invokespecial:
 795     case Bytecodes::_invokestatic:
 796     case Bytecodes::_invokedynamic:
 797       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 798       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 799           profile_return_for_invoke(stream->method(), stream->bci())) {
 800         cell_count = CallTypeData::compute_cell_count(stream);
 801       } else {
 802         cell_count = CounterData::static_cell_count();
 803       }
 804       break;
 805     case Bytecodes::_invokevirtual:
 806     case Bytecodes::_invokeinterface: {
 807       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 808       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 809           profile_return_for_invoke(stream->method(), stream->bci())) {
 810         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 811       } else {
 812         cell_count = VirtualCallData::static_cell_count();
 813       }
 814       break;
 815     }
 816     default:
 817       fatal("unexpected bytecode for var length profile data");
 818     }
 819   }
 820   // Note:  cell_count might be zero, meaning that there is just
 821   //        a DataLayout header, with no extra cells.
 822   assert(cell_count >= 0, "sanity");
 823   return DataLayout::compute_size_in_bytes(cell_count);
 824 }
 825 
 826 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 827   // Bytecodes for which we may use speculation
 828   switch (code) {
 829   case Bytecodes::_checkcast:
 830   case Bytecodes::_instanceof:
 831   case Bytecodes::_aastore:
 832   case Bytecodes::_invokevirtual:
 833   case Bytecodes::_invokeinterface:
 834   case Bytecodes::_if_acmpeq:
 835   case Bytecodes::_if_acmpne:
 836   case Bytecodes::_ifnull:
 837   case Bytecodes::_ifnonnull:
 838   case Bytecodes::_invokestatic:
 839 #ifdef COMPILER2
 840     if (is_server_compilation_mode_vm()) {
 841       return UseTypeSpeculation;
 842     }
 843 #endif
 844   default:
 845     return false;
 846   }
 847   return false;
 848 }
 849 
 850 #if INCLUDE_JVMCI
 851 
 852 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 853   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 854 }
 855 
 856 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) {
 857   memcpy(data(), speculation, speculation_len);
 858 }
 859 
 860 // A heuristic check to detect nmethods that outlive a failed speculations list.
 861 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 862   jlong head = (jlong)(address) *failed_speculations_address;
 863   if ((head & 0x1) == 0x1) {
 864     stringStream st;
 865     if (nm != NULL) {
 866       st.print("%d", nm->compile_id());
 867       Method* method = nm->method();
 868       st.print_raw("{");
 869       if (method != NULL) {
 870         method->print_name(&st);
 871       } else {
 872         const char* jvmci_name = nm->jvmci_name();
 873         if (jvmci_name != NULL) {
 874           st.print_raw(jvmci_name);
 875         }
 876       }
 877       st.print_raw("}");
 878     } else {
 879       st.print("<unknown>");
 880     }
 881     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 882   }
 883 }
 884 
 885 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 886   assert(failed_speculations_address != NULL, "must be");
 887   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 888   FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 889   if (fs == NULL) {
 890     // no memory -> ignore failed speculation
 891     return false;
 892   }
 893 
 894   guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 895   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 896 
 897   FailedSpeculation** cursor = failed_speculations_address;
 898   do {
 899     if (*cursor == NULL) {
 900       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs);
 901       if (old_fs == NULL) {
 902         // Successfully appended fs to end of the list
 903         return true;
 904       }
 905       cursor = old_fs->next_adr();
 906     } else {
 907       cursor = (*cursor)->next_adr();
 908     }
 909   } while (true);
 910 }
 911 
 912 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 913   assert(failed_speculations_address != NULL, "must be");
 914   FailedSpeculation* fs = *failed_speculations_address;
 915   while (fs != NULL) {
 916     FailedSpeculation* next = fs->next();
 917     delete fs;
 918     fs = next;
 919   }
 920 
 921   // Write an unaligned value to failed_speculations_address to denote
 922   // that it is no longer a valid pointer. This is allows for the check
 923   // in add_failed_speculation against adding to a freed failed
 924   // speculations list.
 925   long* head = (long*) failed_speculations_address;
 926   (*head) = (*head) | 0x1;
 927 }
 928 #endif // INCLUDE_JVMCI
 929 
 930 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 931 #if INCLUDE_JVMCI
 932   if (ProfileTraps) {
 933     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 934     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 935 
 936     // Make sure we have a minimum number of extra data slots to
 937     // allocate SpeculativeTrapData entries. We would want to have one
 938     // entry per compilation that inlines this method and for which
 939     // some type speculation assumption fails. So the room we need for
 940     // the SpeculativeTrapData entries doesn't directly depend on the
 941     // size of the method. Because it's hard to estimate, we reserve
 942     // space for an arbitrary number of entries.
 943     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 944       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 945 
 946     return MAX2(extra_data_count, spec_data_count);
 947   } else {
 948     return 0;
 949   }
 950 #else // INCLUDE_JVMCI
 951   if (ProfileTraps) {
 952     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 953     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 954     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 955     int one_percent_of_data
 956       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 957     if (extra_data_count < one_percent_of_data)
 958       extra_data_count = one_percent_of_data;
 959     if (extra_data_count > empty_bc_count)
 960       extra_data_count = empty_bc_count;  // no need for more
 961 
 962     // Make sure we have a minimum number of extra data slots to
 963     // allocate SpeculativeTrapData entries. We would want to have one
 964     // entry per compilation that inlines this method and for which
 965     // some type speculation assumption fails. So the room we need for
 966     // the SpeculativeTrapData entries doesn't directly depend on the
 967     // size of the method. Because it's hard to estimate, we reserve
 968     // space for an arbitrary number of entries.
 969     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 970       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 971 
 972     return MAX2(extra_data_count, spec_data_count);
 973   } else {
 974     return 0;
 975   }
 976 #endif // INCLUDE_JVMCI
 977 }
 978 
 979 // Compute the size of the MethodData* necessary to store
 980 // profiling information about a given method.  Size is in bytes.
 981 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 982   int data_size = 0;
 983   BytecodeStream stream(method);
 984   Bytecodes::Code c;
 985   int empty_bc_count = 0;  // number of bytecodes lacking data
 986   bool needs_speculative_traps = false;
 987   while ((c = stream.next()) >= 0) {
 988     int size_in_bytes = compute_data_size(&stream);
 989     data_size += size_in_bytes;
 990     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 991     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 992   }
 993   int object_size = in_bytes(data_offset()) + data_size;
 994 
 995   // Add some extra DataLayout cells (at least one) to track stray traps.
 996   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 997   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 998 
 999   // Add a cell to record information about modified arguments.
1000   int arg_size = method->size_of_parameters();
1001   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1002 
1003   // Reserve room for an area of the MDO dedicated to profiling of
1004   // parameters
1005   int args_cell = ParametersTypeData::compute_cell_count(method());
1006   if (args_cell > 0) {
1007     object_size += DataLayout::compute_size_in_bytes(args_cell);
1008   }
1009   return object_size;
1010 }
1011 
1012 // Compute the size of the MethodData* necessary to store
1013 // profiling information about a given method.  Size is in words
1014 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1015   int byte_size = compute_allocation_size_in_bytes(method);
1016   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1017   return align_metadata_size(word_size);
1018 }
1019 
1020 // Initialize an individual data segment.  Returns the size of
1021 // the segment in bytes.
1022 int MethodData::initialize_data(BytecodeStream* stream,
1023                                        int data_index) {
1024   if (is_client_compilation_mode_vm()) {
1025     return 0;
1026   }
1027   int cell_count = -1;
1028   int tag = DataLayout::no_tag;
1029   DataLayout* data_layout = data_layout_at(data_index);
1030   Bytecodes::Code c = stream->code();
1031   switch (c) {
1032   case Bytecodes::_checkcast:
1033   case Bytecodes::_instanceof:
1034   case Bytecodes::_aastore:
1035     if (TypeProfileCasts) {
1036       cell_count = ReceiverTypeData::static_cell_count();
1037       tag = DataLayout::receiver_type_data_tag;
1038     } else {
1039       cell_count = BitData::static_cell_count();
1040       tag = DataLayout::bit_data_tag;
1041     }
1042     break;
1043   case Bytecodes::_invokespecial:
1044   case Bytecodes::_invokestatic: {
1045     int counter_data_cell_count = CounterData::static_cell_count();
1046     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1047         profile_return_for_invoke(stream->method(), stream->bci())) {
1048       cell_count = CallTypeData::compute_cell_count(stream);
1049     } else {
1050       cell_count = counter_data_cell_count;
1051     }
1052     if (cell_count > counter_data_cell_count) {
1053       tag = DataLayout::call_type_data_tag;
1054     } else {
1055       tag = DataLayout::counter_data_tag;
1056     }
1057     break;
1058   }
1059   case Bytecodes::_goto:
1060   case Bytecodes::_goto_w:
1061   case Bytecodes::_jsr:
1062   case Bytecodes::_jsr_w:
1063     cell_count = JumpData::static_cell_count();
1064     tag = DataLayout::jump_data_tag;
1065     break;
1066   case Bytecodes::_invokevirtual:
1067   case Bytecodes::_invokeinterface: {
1068     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1069     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1070         profile_return_for_invoke(stream->method(), stream->bci())) {
1071       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1072     } else {
1073       cell_count = virtual_call_data_cell_count;
1074     }
1075     if (cell_count > virtual_call_data_cell_count) {
1076       tag = DataLayout::virtual_call_type_data_tag;
1077     } else {
1078       tag = DataLayout::virtual_call_data_tag;
1079     }
1080     break;
1081   }
1082   case Bytecodes::_invokedynamic: {
1083     // %%% should make a type profile for any invokedynamic that takes a ref argument
1084     int counter_data_cell_count = CounterData::static_cell_count();
1085     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1086         profile_return_for_invoke(stream->method(), stream->bci())) {
1087       cell_count = CallTypeData::compute_cell_count(stream);
1088     } else {
1089       cell_count = counter_data_cell_count;
1090     }
1091     if (cell_count > counter_data_cell_count) {
1092       tag = DataLayout::call_type_data_tag;
1093     } else {
1094       tag = DataLayout::counter_data_tag;
1095     }
1096     break;
1097   }
1098   case Bytecodes::_ret:
1099     cell_count = RetData::static_cell_count();
1100     tag = DataLayout::ret_data_tag;
1101     break;
1102   case Bytecodes::_ifeq:
1103   case Bytecodes::_ifne:
1104   case Bytecodes::_iflt:
1105   case Bytecodes::_ifge:
1106   case Bytecodes::_ifgt:
1107   case Bytecodes::_ifle:
1108   case Bytecodes::_if_icmpeq:
1109   case Bytecodes::_if_icmpne:
1110   case Bytecodes::_if_icmplt:
1111   case Bytecodes::_if_icmpge:
1112   case Bytecodes::_if_icmpgt:
1113   case Bytecodes::_if_icmple:
1114   case Bytecodes::_if_acmpeq:
1115   case Bytecodes::_if_acmpne:
1116   case Bytecodes::_ifnull:
1117   case Bytecodes::_ifnonnull:
1118     cell_count = BranchData::static_cell_count();
1119     tag = DataLayout::branch_data_tag;
1120     break;
1121   case Bytecodes::_lookupswitch:
1122   case Bytecodes::_tableswitch:
1123     cell_count = MultiBranchData::compute_cell_count(stream);
1124     tag = DataLayout::multi_branch_data_tag;
1125     break;
1126   default:
1127     break;
1128   }
1129   assert(tag == DataLayout::multi_branch_data_tag ||
1130          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1131           (tag == DataLayout::call_type_data_tag ||
1132            tag == DataLayout::counter_data_tag ||
1133            tag == DataLayout::virtual_call_type_data_tag ||
1134            tag == DataLayout::virtual_call_data_tag)) ||
1135          cell_count == bytecode_cell_count(c), "cell counts must agree");
1136   if (cell_count >= 0) {
1137     assert(tag != DataLayout::no_tag, "bad tag");
1138     assert(bytecode_has_profile(c), "agree w/ BHP");
1139     data_layout->initialize(tag, stream->bci(), cell_count);
1140     return DataLayout::compute_size_in_bytes(cell_count);
1141   } else {
1142     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1143     return 0;
1144   }
1145 }
1146 
1147 // Get the data at an arbitrary (sort of) data index.
1148 ProfileData* MethodData::data_at(int data_index) const {
1149   if (out_of_bounds(data_index)) {
1150     return NULL;
1151   }
1152   DataLayout* data_layout = data_layout_at(data_index);
1153   return data_layout->data_in();
1154 }
1155 
1156 ProfileData* DataLayout::data_in() {
1157   switch (tag()) {
1158   case DataLayout::no_tag:
1159   default:
1160     ShouldNotReachHere();
1161     return NULL;
1162   case DataLayout::bit_data_tag:
1163     return new BitData(this);
1164   case DataLayout::counter_data_tag:
1165     return new CounterData(this);
1166   case DataLayout::jump_data_tag:
1167     return new JumpData(this);
1168   case DataLayout::receiver_type_data_tag:
1169     return new ReceiverTypeData(this);
1170   case DataLayout::virtual_call_data_tag:
1171     return new VirtualCallData(this);
1172   case DataLayout::ret_data_tag:
1173     return new RetData(this);
1174   case DataLayout::branch_data_tag:
1175     return new BranchData(this);
1176   case DataLayout::multi_branch_data_tag:
1177     return new MultiBranchData(this);
1178   case DataLayout::arg_info_data_tag:
1179     return new ArgInfoData(this);
1180   case DataLayout::call_type_data_tag:
1181     return new CallTypeData(this);
1182   case DataLayout::virtual_call_type_data_tag:
1183     return new VirtualCallTypeData(this);
1184   case DataLayout::parameters_type_data_tag:
1185     return new ParametersTypeData(this);
1186   case DataLayout::speculative_trap_data_tag:
1187     return new SpeculativeTrapData(this);
1188   }
1189 }
1190 
1191 // Iteration over data.
1192 ProfileData* MethodData::next_data(ProfileData* current) const {
1193   int current_index = dp_to_di(current->dp());
1194   int next_index = current_index + current->size_in_bytes();
1195   ProfileData* next = data_at(next_index);
1196   return next;
1197 }
1198 
1199 // Give each of the data entries a chance to perform specific
1200 // data initialization.
1201 void MethodData::post_initialize(BytecodeStream* stream) {
1202   ResourceMark rm;
1203   ProfileData* data;
1204   for (data = first_data(); is_valid(data); data = next_data(data)) {
1205     stream->set_start(data->bci());
1206     stream->next();
1207     data->post_initialize(stream, this);
1208   }
1209   if (_parameters_type_data_di != no_parameters) {
1210     parameters_type_data()->post_initialize(NULL, this);
1211   }
1212 }
1213 
1214 // Initialize the MethodData* corresponding to a given method.
1215 MethodData::MethodData(const methodHandle& method, int size, TRAPS)
1216   : _extra_data_lock(Mutex::leaf, "MDO extra data lock"),
1217     _parameters_type_data_di(parameters_uninitialized) {
1218   // Set the method back-pointer.
1219   _method = method();
1220   initialize();
1221 }
1222 
1223 void MethodData::initialize() {
1224   Thread* thread = Thread::current();
1225   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1226   ResourceMark rm(thread);
1227 
1228   init();
1229   set_creation_mileage(mileage_of(method()));
1230 
1231   // Go through the bytecodes and allocate and initialize the
1232   // corresponding data cells.
1233   int data_size = 0;
1234   int empty_bc_count = 0;  // number of bytecodes lacking data
1235   _data[0] = 0;  // apparently not set below.
1236   BytecodeStream stream(methodHandle(thread, method()));
1237   Bytecodes::Code c;
1238   bool needs_speculative_traps = false;
1239   while ((c = stream.next()) >= 0) {
1240     int size_in_bytes = initialize_data(&stream, data_size);
1241     data_size += size_in_bytes;
1242     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1243     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1244   }
1245   _data_size = data_size;
1246   int object_size = in_bytes(data_offset()) + data_size;
1247 
1248   // Add some extra DataLayout cells (at least one) to track stray traps.
1249   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1250   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1251 
1252   // Let's zero the space for the extra data
1253   Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1254 
1255   // Add a cell to record information about modified arguments.
1256   // Set up _args_modified array after traps cells so that
1257   // the code for traps cells works.
1258   DataLayout *dp = data_layout_at(data_size + extra_size);
1259 
1260   int arg_size = method()->size_of_parameters();
1261   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1262 
1263   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1264   object_size += extra_size + arg_data_size;
1265 
1266   int parms_cell = ParametersTypeData::compute_cell_count(method());
1267   // If we are profiling parameters, we reserver an area near the end
1268   // of the MDO after the slots for bytecodes (because there's no bci
1269   // for method entry so they don't fit with the framework for the
1270   // profiling of bytecodes). We store the offset within the MDO of
1271   // this area (or -1 if no parameter is profiled)
1272   if (parms_cell > 0) {
1273     object_size += DataLayout::compute_size_in_bytes(parms_cell);
1274     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1275     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1276     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1277   } else {
1278     _parameters_type_data_di = no_parameters;
1279   }
1280 
1281   // Set an initial hint. Don't use set_hint_di() because
1282   // first_di() may be out of bounds if data_size is 0.
1283   // In that situation, _hint_di is never used, but at
1284   // least well-defined.
1285   _hint_di = first_di();
1286 
1287   post_initialize(&stream);
1288 
1289   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1290   set_size(object_size);
1291 }
1292 
1293 void MethodData::init() {
1294   _invocation_counter.init();
1295   _backedge_counter.init();
1296   _invocation_counter_start = 0;
1297   _backedge_counter_start = 0;
1298 
1299   // Set per-method invoke- and backedge mask.
1300   double scale = 1.0;
1301   methodHandle mh(Thread::current(), _method);
1302   CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
1303   _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1304   _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1305 
1306   _tenure_traps = 0;
1307   _num_loops = 0;
1308   _num_blocks = 0;
1309   _would_profile = unknown;
1310 
1311 #if INCLUDE_JVMCI
1312   _jvmci_ir_size = 0;
1313   _failed_speculations = NULL;
1314 #endif
1315 
1316 #if INCLUDE_RTM_OPT
1317   _rtm_state = NoRTM; // No RTM lock eliding by default
1318   if (UseRTMLocking &&
1319       !CompilerOracle::has_option_string(mh, "NoRTMLockEliding")) {
1320     if (CompilerOracle::has_option_string(mh, "UseRTMLockEliding") || !UseRTMDeopt) {
1321       // Generate RTM lock eliding code without abort ratio calculation code.
1322       _rtm_state = UseRTM;
1323     } else if (UseRTMDeopt) {
1324       // Generate RTM lock eliding code and include abort ratio calculation
1325       // code if UseRTMDeopt is on.
1326       _rtm_state = ProfileRTM;
1327     }
1328   }
1329 #endif
1330 
1331   // Initialize flags and trap history.
1332   _nof_decompiles = 0;
1333   _nof_overflow_recompiles = 0;
1334   _nof_overflow_traps = 0;
1335   clear_escape_info();
1336   assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
1337   Copy::zero_to_words((HeapWord*) &_trap_hist,
1338                       sizeof(_trap_hist) / sizeof(HeapWord));
1339 }
1340 
1341 // Get a measure of how much mileage the method has on it.
1342 int MethodData::mileage_of(Method* method) {
1343   int mileage = 0;
1344   if (TieredCompilation) {
1345     mileage = MAX2(method->invocation_count(), method->backedge_count());
1346   } else {
1347     int iic = method->interpreter_invocation_count();
1348     if (mileage < iic)  mileage = iic;
1349     MethodCounters* mcs = method->method_counters();
1350     if (mcs != NULL) {
1351       InvocationCounter* ic = mcs->invocation_counter();
1352       InvocationCounter* bc = mcs->backedge_counter();
1353       int icval = ic->count();
1354       if (ic->carry()) icval += CompileThreshold;
1355       if (mileage < icval)  mileage = icval;
1356       int bcval = bc->count();
1357       if (bc->carry()) bcval += CompileThreshold;
1358       if (mileage < bcval)  mileage = bcval;
1359     }
1360   }
1361   return mileage;
1362 }
1363 
1364 bool MethodData::is_mature() const {
1365   return CompilationPolicy::policy()->is_mature(_method);
1366 }
1367 
1368 // Translate a bci to its corresponding data index (di).
1369 address MethodData::bci_to_dp(int bci) {
1370   ResourceMark rm;
1371   ProfileData* data = data_before(bci);
1372   ProfileData* prev = NULL;
1373   for ( ; is_valid(data); data = next_data(data)) {
1374     if (data->bci() >= bci) {
1375       if (data->bci() == bci)  set_hint_di(dp_to_di(data->dp()));
1376       else if (prev != NULL)   set_hint_di(dp_to_di(prev->dp()));
1377       return data->dp();
1378     }
1379     prev = data;
1380   }
1381   return (address)limit_data_position();
1382 }
1383 
1384 // Translate a bci to its corresponding data, or NULL.
1385 ProfileData* MethodData::bci_to_data(int bci) {
1386   ProfileData* data = data_before(bci);
1387   for ( ; is_valid(data); data = next_data(data)) {
1388     if (data->bci() == bci) {
1389       set_hint_di(dp_to_di(data->dp()));
1390       return data;
1391     } else if (data->bci() > bci) {
1392       break;
1393     }
1394   }
1395   return bci_to_extra_data(bci, NULL, false);
1396 }
1397 
1398 DataLayout* MethodData::next_extra(DataLayout* dp) {
1399   int nb_cells = 0;
1400   switch(dp->tag()) {
1401   case DataLayout::bit_data_tag:
1402   case DataLayout::no_tag:
1403     nb_cells = BitData::static_cell_count();
1404     break;
1405   case DataLayout::speculative_trap_data_tag:
1406     nb_cells = SpeculativeTrapData::static_cell_count();
1407     break;
1408   default:
1409     fatal("unexpected tag %d", dp->tag());
1410   }
1411   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1412 }
1413 
1414 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1415   DataLayout* end = args_data_limit();
1416 
1417   for (;; dp = next_extra(dp)) {
1418     assert(dp < end, "moved past end of extra data");
1419     // No need for "Atomic::load_acquire" ops,
1420     // since the data structure is monotonic.
1421     switch(dp->tag()) {
1422     case DataLayout::no_tag:
1423       return NULL;
1424     case DataLayout::arg_info_data_tag:
1425       dp = end;
1426       return NULL; // ArgInfoData is at the end of extra data section.
1427     case DataLayout::bit_data_tag:
1428       if (m == NULL && dp->bci() == bci) {
1429         return new BitData(dp);
1430       }
1431       break;
1432     case DataLayout::speculative_trap_data_tag:
1433       if (m != NULL) {
1434         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1435         // data->method() may be null in case of a concurrent
1436         // allocation. Maybe it's for the same method. Try to use that
1437         // entry in that case.
1438         if (dp->bci() == bci) {
1439           if (data->method() == NULL) {
1440             assert(concurrent, "impossible because no concurrent allocation");
1441             return NULL;
1442           } else if (data->method() == m) {
1443             return data;
1444           }
1445         }
1446       }
1447       break;
1448     default:
1449       fatal("unexpected tag %d", dp->tag());
1450     }
1451   }
1452   return NULL;
1453 }
1454 
1455 
1456 // Translate a bci to its corresponding extra data, or NULL.
1457 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1458   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1459   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1460          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1461          "code needs to be adjusted");
1462 
1463   // Do not create one of these if method has been redefined.
1464   if (m != NULL && m->is_old()) {
1465     return NULL;
1466   }
1467 
1468   DataLayout* dp  = extra_data_base();
1469   DataLayout* end = args_data_limit();
1470 
1471   // Allocation in the extra data space has to be atomic because not
1472   // all entries have the same size and non atomic concurrent
1473   // allocation would result in a corrupted extra data space.
1474   ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1475   if (result != NULL) {
1476     return result;
1477   }
1478 
1479   if (create_if_missing && dp < end) {
1480     MutexLocker ml(&_extra_data_lock);
1481     // Check again now that we have the lock. Another thread may
1482     // have added extra data entries.
1483     ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1484     if (result != NULL || dp >= end) {
1485       return result;
1486     }
1487 
1488     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
1489     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1490     u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1491     // SpeculativeTrapData is 2 slots. Make sure we have room.
1492     if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
1493       return NULL;
1494     }
1495     DataLayout temp;
1496     temp.initialize(tag, bci, 0);
1497 
1498     dp->set_header(temp.header());
1499     assert(dp->tag() == tag, "sane");
1500     assert(dp->bci() == bci, "no concurrent allocation");
1501     if (tag == DataLayout::bit_data_tag) {
1502       return new BitData(dp);
1503     } else {
1504       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1505       data->set_method(m);
1506       return data;
1507     }
1508   }
1509   return NULL;
1510 }
1511 
1512 ArgInfoData *MethodData::arg_info() {
1513   DataLayout* dp    = extra_data_base();
1514   DataLayout* end   = args_data_limit();
1515   for (; dp < end; dp = next_extra(dp)) {
1516     if (dp->tag() == DataLayout::arg_info_data_tag)
1517       return new ArgInfoData(dp);
1518   }
1519   return NULL;
1520 }
1521 
1522 // Printing
1523 
1524 void MethodData::print_on(outputStream* st) const {
1525   assert(is_methodData(), "should be method data");
1526   st->print("method data for ");
1527   method()->print_value_on(st);
1528   st->cr();
1529   print_data_on(st);
1530 }
1531 
1532 void MethodData::print_value_on(outputStream* st) const {
1533   assert(is_methodData(), "should be method data");
1534   st->print("method data for ");
1535   method()->print_value_on(st);
1536 }
1537 
1538 void MethodData::print_data_on(outputStream* st) const {
1539   ResourceMark rm;
1540   ProfileData* data = first_data();
1541   if (_parameters_type_data_di != no_parameters) {
1542     parameters_type_data()->print_data_on(st);
1543   }
1544   for ( ; is_valid(data); data = next_data(data)) {
1545     st->print("%d", dp_to_di(data->dp()));
1546     st->fill_to(6);
1547     data->print_data_on(st, this);
1548   }
1549   st->print_cr("--- Extra data:");
1550   DataLayout* dp    = extra_data_base();
1551   DataLayout* end   = args_data_limit();
1552   for (;; dp = next_extra(dp)) {
1553     assert(dp < end, "moved past end of extra data");
1554     // No need for "Atomic::load_acquire" ops,
1555     // since the data structure is monotonic.
1556     switch(dp->tag()) {
1557     case DataLayout::no_tag:
1558       continue;
1559     case DataLayout::bit_data_tag:
1560       data = new BitData(dp);
1561       break;
1562     case DataLayout::speculative_trap_data_tag:
1563       data = new SpeculativeTrapData(dp);
1564       break;
1565     case DataLayout::arg_info_data_tag:
1566       data = new ArgInfoData(dp);
1567       dp = end; // ArgInfoData is at the end of extra data section.
1568       break;
1569     default:
1570       fatal("unexpected tag %d", dp->tag());
1571     }
1572     st->print("%d", dp_to_di(data->dp()));
1573     st->fill_to(6);
1574     data->print_data_on(st);
1575     if (dp >= end) return;
1576   }
1577 }
1578 
1579 #if INCLUDE_SERVICES
1580 // Size Statistics
1581 void MethodData::collect_statistics(KlassSizeStats *sz) const {
1582   int n = sz->count(this);
1583   sz->_method_data_bytes += n;
1584   sz->_method_all_bytes += n;
1585   sz->_rw_bytes += n;
1586 }
1587 #endif // INCLUDE_SERVICES
1588 
1589 // Verification
1590 
1591 void MethodData::verify_on(outputStream* st) {
1592   guarantee(is_methodData(), "object must be method data");
1593   // guarantee(m->is_perm(), "should be in permspace");
1594   this->verify_data_on(st);
1595 }
1596 
1597 void MethodData::verify_data_on(outputStream* st) {
1598   NEEDS_CLEANUP;
1599   // not yet implemented.
1600 }
1601 
1602 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1603   if (m->is_compiled_lambda_form()) {
1604     return true;
1605   }
1606 
1607   Bytecode_invoke inv(m , bci);
1608   return inv.is_invokedynamic() || inv.is_invokehandle();
1609 }
1610 
1611 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1612   Bytecode_invoke inv(m , bci);
1613   if (inv.is_invokevirtual()) {
1614     if (inv.klass() == vmSymbols::jdk_internal_misc_Unsafe() ||
1615         inv.klass() == vmSymbols::sun_misc_Unsafe()) {
1616       ResourceMark rm;
1617       char* name = inv.name()->as_C_string();
1618       if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) {
1619         return true;
1620       }
1621     }
1622   }
1623   return false;
1624 }
1625 
1626 int MethodData::profile_arguments_flag() {
1627   return TypeProfileLevel % 10;
1628 }
1629 
1630 bool MethodData::profile_arguments() {
1631   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
1632 }
1633 
1634 bool MethodData::profile_arguments_jsr292_only() {
1635   return profile_arguments_flag() == type_profile_jsr292;
1636 }
1637 
1638 bool MethodData::profile_all_arguments() {
1639   return profile_arguments_flag() == type_profile_all;
1640 }
1641 
1642 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1643   if (!profile_arguments()) {
1644     return false;
1645   }
1646 
1647   if (profile_all_arguments()) {
1648     return true;
1649   }
1650 
1651   if (profile_unsafe(m, bci)) {
1652     return true;
1653   }
1654 
1655   assert(profile_arguments_jsr292_only(), "inconsistent");
1656   return profile_jsr292(m, bci);
1657 }
1658 
1659 int MethodData::profile_return_flag() {
1660   return (TypeProfileLevel % 100) / 10;
1661 }
1662 
1663 bool MethodData::profile_return() {
1664   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1665 }
1666 
1667 bool MethodData::profile_return_jsr292_only() {
1668   return profile_return_flag() == type_profile_jsr292;
1669 }
1670 
1671 bool MethodData::profile_all_return() {
1672   return profile_return_flag() == type_profile_all;
1673 }
1674 
1675 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1676   if (!profile_return()) {
1677     return false;
1678   }
1679 
1680   if (profile_all_return()) {
1681     return true;
1682   }
1683 
1684   assert(profile_return_jsr292_only(), "inconsistent");
1685   return profile_jsr292(m, bci);
1686 }
1687 
1688 int MethodData::profile_parameters_flag() {
1689   return TypeProfileLevel / 100;
1690 }
1691 
1692 bool MethodData::profile_parameters() {
1693   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1694 }
1695 
1696 bool MethodData::profile_parameters_jsr292_only() {
1697   return profile_parameters_flag() == type_profile_jsr292;
1698 }
1699 
1700 bool MethodData::profile_all_parameters() {
1701   return profile_parameters_flag() == type_profile_all;
1702 }
1703 
1704 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1705   if (!profile_parameters()) {
1706     return false;
1707   }
1708 
1709   if (profile_all_parameters()) {
1710     return true;
1711   }
1712 
1713   assert(profile_parameters_jsr292_only(), "inconsistent");
1714   return m->is_compiled_lambda_form();
1715 }
1716 
1717 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1718   log_trace(cds)("Iter(MethodData): %p", this);
1719   it->push(&_method);
1720 }
1721 
1722 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1723   if (shift == 0) {
1724     return;
1725   }
1726   if (!reset) {
1727     // Move all cells of trap entry at dp left by "shift" cells
1728     intptr_t* start = (intptr_t*)dp;
1729     intptr_t* end = (intptr_t*)next_extra(dp);
1730     for (intptr_t* ptr = start; ptr < end; ptr++) {
1731       *(ptr-shift) = *ptr;
1732     }
1733   } else {
1734     // Reset "shift" cells stopping at dp
1735     intptr_t* start = ((intptr_t*)dp) - shift;
1736     intptr_t* end = (intptr_t*)dp;
1737     for (intptr_t* ptr = start; ptr < end; ptr++) {
1738       *ptr = 0;
1739     }
1740   }
1741 }
1742 
1743 // Check for entries that reference an unloaded method
1744 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1745   bool _always_clean;
1746 public:
1747   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1748   bool is_live(Method* m) {
1749     return !(_always_clean) && m->method_holder()->is_loader_alive();
1750   }
1751 };
1752 
1753 // Check for entries that reference a redefined method
1754 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1755 public:
1756   CleanExtraDataMethodClosure() {}
1757   bool is_live(Method* m) { return !m->is_old(); }
1758 };
1759 
1760 
1761 // Remove SpeculativeTrapData entries that reference an unloaded or
1762 // redefined method
1763 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1764   DataLayout* dp  = extra_data_base();
1765   DataLayout* end = args_data_limit();
1766 
1767   int shift = 0;
1768   for (; dp < end; dp = next_extra(dp)) {
1769     switch(dp->tag()) {
1770     case DataLayout::speculative_trap_data_tag: {
1771       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1772       Method* m = data->method();
1773       assert(m != NULL, "should have a method");
1774       if (!cl->is_live(m)) {
1775         // "shift" accumulates the number of cells for dead
1776         // SpeculativeTrapData entries that have been seen so
1777         // far. Following entries must be shifted left by that many
1778         // cells to remove the dead SpeculativeTrapData entries.
1779         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1780       } else {
1781         // Shift this entry left if it follows dead
1782         // SpeculativeTrapData entries
1783         clean_extra_data_helper(dp, shift);
1784       }
1785       break;
1786     }
1787     case DataLayout::bit_data_tag:
1788       // Shift this entry left if it follows dead SpeculativeTrapData
1789       // entries
1790       clean_extra_data_helper(dp, shift);
1791       continue;
1792     case DataLayout::no_tag:
1793     case DataLayout::arg_info_data_tag:
1794       // We are at end of the live trap entries. The previous "shift"
1795       // cells contain entries that are either dead or were shifted
1796       // left. They need to be reset to no_tag
1797       clean_extra_data_helper(dp, shift, true);
1798       return;
1799     default:
1800       fatal("unexpected tag %d", dp->tag());
1801     }
1802   }
1803 }
1804 
1805 // Verify there's no unloaded or redefined method referenced by a
1806 // SpeculativeTrapData entry
1807 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1808 #ifdef ASSERT
1809   DataLayout* dp  = extra_data_base();
1810   DataLayout* end = args_data_limit();
1811 
1812   for (; dp < end; dp = next_extra(dp)) {
1813     switch(dp->tag()) {
1814     case DataLayout::speculative_trap_data_tag: {
1815       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1816       Method* m = data->method();
1817       assert(m != NULL && cl->is_live(m), "Method should exist");
1818       break;
1819     }
1820     case DataLayout::bit_data_tag:
1821       continue;
1822     case DataLayout::no_tag:
1823     case DataLayout::arg_info_data_tag:
1824       return;
1825     default:
1826       fatal("unexpected tag %d", dp->tag());
1827     }
1828   }
1829 #endif
1830 }
1831 
1832 void MethodData::clean_method_data(bool always_clean) {
1833   ResourceMark rm;
1834   for (ProfileData* data = first_data();
1835        is_valid(data);
1836        data = next_data(data)) {
1837     data->clean_weak_klass_links(always_clean);
1838   }
1839   ParametersTypeData* parameters = parameters_type_data();
1840   if (parameters != NULL) {
1841     parameters->clean_weak_klass_links(always_clean);
1842   }
1843 
1844   CleanExtraDataKlassClosure cl(always_clean);
1845   clean_extra_data(&cl);
1846   verify_extra_data_clean(&cl);
1847 }
1848 
1849 // This is called during redefinition to clean all "old" redefined
1850 // methods out of MethodData for all methods.
1851 void MethodData::clean_weak_method_links() {
1852   ResourceMark rm;
1853   for (ProfileData* data = first_data();
1854        is_valid(data);
1855        data = next_data(data)) {
1856     data->clean_weak_method_links();
1857   }
1858 
1859   CleanExtraDataMethodClosure cl;
1860   clean_extra_data(&cl);
1861   verify_extra_data_clean(&cl);
1862 }
1863 
1864 #ifdef ASSERT
1865 void MethodData::verify_clean_weak_method_links() {
1866   ResourceMark rm;
1867   for (ProfileData* data = first_data();
1868        is_valid(data);
1869        data = next_data(data)) {
1870     data->verify_clean_weak_method_links();
1871   }
1872 
1873   CleanExtraDataMethodClosure cl;
1874   verify_extra_data_clean(&cl);
1875 }
1876 #endif // ASSERT