1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_METHODDATAOOP_HPP
  26 #define SHARE_VM_OOPS_METHODDATAOOP_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "memory/universe.hpp"
  30 #include "oops/method.hpp"
  31 #include "oops/oop.hpp"
  32 #include "utilities/align.hpp"
  33 #if INCLUDE_JVMCI
  34 #include "jvmci/jvmci_globals.hpp"
  35 #endif
  36 
  37 class BytecodeStream;
  38 class KlassSizeStats;
  39 
  40 // The MethodData object collects counts and other profile information
  41 // during zeroth-tier (interpretive) and first-tier execution.
  42 // The profile is used later by compilation heuristics.  Some heuristics
  43 // enable use of aggressive (or "heroic") optimizations.  An aggressive
  44 // optimization often has a down-side, a corner case that it handles
  45 // poorly, but which is thought to be rare.  The profile provides
  46 // evidence of this rarity for a given method or even BCI.  It allows
  47 // the compiler to back out of the optimization at places where it
  48 // has historically been a poor choice.  Other heuristics try to use
  49 // specific information gathered about types observed at a given site.
  50 //
  51 // All data in the profile is approximate.  It is expected to be accurate
  52 // on the whole, but the system expects occasional inaccuraces, due to
  53 // counter overflow, multiprocessor races during data collection, space
  54 // limitations, missing MDO blocks, etc.  Bad or missing data will degrade
  55 // optimization quality but will not affect correctness.  Also, each MDO
  56 // is marked with its birth-date ("creation_mileage") which can be used
  57 // to assess the quality ("maturity") of its data.
  58 //
  59 // Short (<32-bit) counters are designed to overflow to a known "saturated"
  60 // state.  Also, certain recorded per-BCI events are given one-bit counters
  61 // which overflow to a saturated state which applied to all counters at
  62 // that BCI.  In other words, there is a small lattice which approximates
  63 // the ideal of an infinite-precision counter for each event at each BCI,
  64 // and the lattice quickly "bottoms out" in a state where all counters
  65 // are taken to be indefinitely large.
  66 //
  67 // The reader will find many data races in profile gathering code, starting
  68 // with invocation counter incrementation.  None of these races harm correct
  69 // execution of the compiled code.
  70 
  71 // forward decl
  72 class ProfileData;
  73 
  74 // DataLayout
  75 //
  76 // Overlay for generic profiling data.
  77 class DataLayout VALUE_OBJ_CLASS_SPEC {
  78   friend class VMStructs;
  79   friend class JVMCIVMStructs;
  80 
  81 private:
  82   // Every data layout begins with a header.  This header
  83   // contains a tag, which is used to indicate the size/layout
  84   // of the data, 4 bits of flags, which can be used in any way,
  85   // 4 bits of trap history (none/one reason/many reasons),
  86   // and a bci, which is used to tie this piece of data to a
  87   // specific bci in the bytecodes.
  88   union {
  89     intptr_t _bits;
  90     struct {
  91       u1 _tag;
  92       u1 _flags;
  93       u2 _bci;
  94     } _struct;
  95   } _header;
  96 
  97   // The data layout has an arbitrary number of cells, each sized
  98   // to accomodate a pointer or an integer.
  99   intptr_t _cells[1];
 100 
 101   // Some types of data layouts need a length field.
 102   static bool needs_array_len(u1 tag);
 103 
 104 public:
 105   enum {
 106     counter_increment = 1
 107   };
 108 
 109   enum {
 110     cell_size = sizeof(intptr_t)
 111   };
 112 
 113   // Tag values
 114   enum {
 115     no_tag,
 116     bit_data_tag,
 117     counter_data_tag,
 118     jump_data_tag,
 119     receiver_type_data_tag,
 120     virtual_call_data_tag,
 121     ret_data_tag,
 122     branch_data_tag,
 123     multi_branch_data_tag,
 124     arg_info_data_tag,
 125     call_type_data_tag,
 126     virtual_call_type_data_tag,
 127     parameters_type_data_tag,
 128     speculative_trap_data_tag
 129   };
 130 
 131   enum {
 132     // The _struct._flags word is formatted as [trap_state:4 | flags:4].
 133     // The trap state breaks down further as [recompile:1 | reason:3].
 134     // This further breakdown is defined in deoptimization.cpp.
 135     // See Deoptimization::trap_state_reason for an assert that
 136     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 137     //
 138     // The trap_state is collected only if ProfileTraps is true.
 139     trap_bits = 1+3,  // 3: enough to distinguish [0..Reason_RECORDED_LIMIT].
 140     trap_shift = BitsPerByte - trap_bits,
 141     trap_mask = right_n_bits(trap_bits),
 142     trap_mask_in_place = (trap_mask << trap_shift),
 143     flag_limit = trap_shift,
 144     flag_mask = right_n_bits(flag_limit),
 145     first_flag = 0
 146   };
 147 
 148   // Size computation
 149   static int header_size_in_bytes() {
 150     return cell_size;
 151   }
 152   static int header_size_in_cells() {
 153     return 1;
 154   }
 155 
 156   static int compute_size_in_bytes(int cell_count) {
 157     return header_size_in_bytes() + cell_count * cell_size;
 158   }
 159 
 160   // Initialization
 161   void initialize(u1 tag, u2 bci, int cell_count);
 162 
 163   // Accessors
 164   u1 tag() {
 165     return _header._struct._tag;
 166   }
 167 
 168   // Return a few bits of trap state.  Range is [0..trap_mask].
 169   // The state tells if traps with zero, one, or many reasons have occurred.
 170   // It also tells whether zero or many recompilations have occurred.
 171   // The associated trap histogram in the MDO itself tells whether
 172   // traps are common or not.  If a BCI shows that a trap X has
 173   // occurred, and the MDO shows N occurrences of X, we make the
 174   // simplifying assumption that all N occurrences can be blamed
 175   // on that BCI.
 176   int trap_state() const {
 177     return ((_header._struct._flags >> trap_shift) & trap_mask);
 178   }
 179 
 180   void set_trap_state(int new_state) {
 181     assert(ProfileTraps, "used only under +ProfileTraps");
 182     uint old_flags = (_header._struct._flags & flag_mask);
 183     _header._struct._flags = (new_state << trap_shift) | old_flags;
 184   }
 185 
 186   u1 flags() const {
 187     return _header._struct._flags;
 188   }
 189 
 190   u2 bci() const {
 191     return _header._struct._bci;
 192   }
 193 
 194   void set_header(intptr_t value) {
 195     _header._bits = value;
 196   }
 197   intptr_t header() {
 198     return _header._bits;
 199   }
 200   void set_cell_at(int index, intptr_t value) {
 201     _cells[index] = value;
 202   }
 203   void release_set_cell_at(int index, intptr_t value);
 204   intptr_t cell_at(int index) const {
 205     return _cells[index];
 206   }
 207 
 208   void set_flag_at(int flag_number) {
 209     assert(flag_number < flag_limit, "oob");
 210     _header._struct._flags |= (0x1 << flag_number);
 211   }
 212   bool flag_at(int flag_number) const {
 213     assert(flag_number < flag_limit, "oob");
 214     return (_header._struct._flags & (0x1 << flag_number)) != 0;
 215   }
 216 
 217   // Low-level support for code generation.
 218   static ByteSize header_offset() {
 219     return byte_offset_of(DataLayout, _header);
 220   }
 221   static ByteSize tag_offset() {
 222     return byte_offset_of(DataLayout, _header._struct._tag);
 223   }
 224   static ByteSize flags_offset() {
 225     return byte_offset_of(DataLayout, _header._struct._flags);
 226   }
 227   static ByteSize bci_offset() {
 228     return byte_offset_of(DataLayout, _header._struct._bci);
 229   }
 230   static ByteSize cell_offset(int index) {
 231     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 232   }
 233 #ifdef CC_INTERP
 234   static int cell_offset_in_bytes(int index) {
 235     return (int)offset_of(DataLayout, _cells[index]);
 236   }
 237 #endif // CC_INTERP
 238   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 239   static int flag_number_to_byte_constant(int flag_number) {
 240     assert(0 <= flag_number && flag_number < flag_limit, "oob");
 241     DataLayout temp; temp.set_header(0);
 242     temp.set_flag_at(flag_number);
 243     return temp._header._struct._flags;
 244   }
 245   // Return a value which, when or-ed as a word into _header, sets the flag.
 246   static intptr_t flag_mask_to_header_mask(int byte_constant) {
 247     DataLayout temp; temp.set_header(0);
 248     temp._header._struct._flags = byte_constant;
 249     return temp._header._bits;
 250   }
 251 
 252   ProfileData* data_in();
 253 
 254   // GC support
 255   void clean_weak_klass_links(BoolObjectClosure* cl);
 256 
 257   // Redefinition support
 258   void clean_weak_method_links();
 259   DEBUG_ONLY(void verify_clean_weak_method_links();)
 260 };
 261 
 262 
 263 // ProfileData class hierarchy
 264 class ProfileData;
 265 class   BitData;
 266 class     CounterData;
 267 class       ReceiverTypeData;
 268 class         VirtualCallData;
 269 class           VirtualCallTypeData;
 270 class       RetData;
 271 class       CallTypeData;
 272 class   JumpData;
 273 class     BranchData;
 274 class   ArrayData;
 275 class     MultiBranchData;
 276 class     ArgInfoData;
 277 class     ParametersTypeData;
 278 class   SpeculativeTrapData;
 279 
 280 // ProfileData
 281 //
 282 // A ProfileData object is created to refer to a section of profiling
 283 // data in a structured way.
 284 class ProfileData : public ResourceObj {
 285   friend class TypeEntries;
 286   friend class ReturnTypeEntry;
 287   friend class TypeStackSlotEntries;
 288 private:
 289   enum {
 290     tab_width_one = 16,
 291     tab_width_two = 36
 292   };
 293 
 294   // This is a pointer to a section of profiling data.
 295   DataLayout* _data;
 296 
 297   char* print_data_on_helper(const MethodData* md) const;
 298 
 299 protected:
 300   DataLayout* data() { return _data; }
 301   const DataLayout* data() const { return _data; }
 302 
 303   enum {
 304     cell_size = DataLayout::cell_size
 305   };
 306 
 307 public:
 308   // How many cells are in this?
 309   virtual int cell_count() const {
 310     ShouldNotReachHere();
 311     return -1;
 312   }
 313 
 314   // Return the size of this data.
 315   int size_in_bytes() {
 316     return DataLayout::compute_size_in_bytes(cell_count());
 317   }
 318 
 319 protected:
 320   // Low-level accessors for underlying data
 321   void set_intptr_at(int index, intptr_t value) {
 322     assert(0 <= index && index < cell_count(), "oob");
 323     data()->set_cell_at(index, value);
 324   }
 325   void release_set_intptr_at(int index, intptr_t value);
 326   intptr_t intptr_at(int index) const {
 327     assert(0 <= index && index < cell_count(), "oob");
 328     return data()->cell_at(index);
 329   }
 330   void set_uint_at(int index, uint value) {
 331     set_intptr_at(index, (intptr_t) value);
 332   }
 333   void release_set_uint_at(int index, uint value);
 334   uint uint_at(int index) const {
 335     return (uint)intptr_at(index);
 336   }
 337   void set_int_at(int index, int value) {
 338     set_intptr_at(index, (intptr_t) value);
 339   }
 340   void release_set_int_at(int index, int value);
 341   int int_at(int index) const {
 342     return (int)intptr_at(index);
 343   }
 344   int int_at_unchecked(int index) const {
 345     return (int)data()->cell_at(index);
 346   }
 347   void set_oop_at(int index, oop value) {
 348     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 349   }
 350   oop oop_at(int index) const {
 351     return cast_to_oop(intptr_at(index));
 352   }
 353 
 354   void set_flag_at(int flag_number) {
 355     data()->set_flag_at(flag_number);
 356   }
 357   bool flag_at(int flag_number) const {
 358     return data()->flag_at(flag_number);
 359   }
 360 
 361   // two convenient imports for use by subclasses:
 362   static ByteSize cell_offset(int index) {
 363     return DataLayout::cell_offset(index);
 364   }
 365   static int flag_number_to_byte_constant(int flag_number) {
 366     return DataLayout::flag_number_to_byte_constant(flag_number);
 367   }
 368 
 369   ProfileData(DataLayout* data) {
 370     _data = data;
 371   }
 372 
 373 #ifdef CC_INTERP
 374   // Static low level accessors for DataLayout with ProfileData's semantics.
 375 
 376   static int cell_offset_in_bytes(int index) {
 377     return DataLayout::cell_offset_in_bytes(index);
 378   }
 379 
 380   static void increment_uint_at_no_overflow(DataLayout* layout, int index,
 381                                             int inc = DataLayout::counter_increment) {
 382     uint count = ((uint)layout->cell_at(index)) + inc;
 383     if (count == 0) return;
 384     layout->set_cell_at(index, (intptr_t) count);
 385   }
 386 
 387   static int int_at(DataLayout* layout, int index) {
 388     return (int)layout->cell_at(index);
 389   }
 390 
 391   static int uint_at(DataLayout* layout, int index) {
 392     return (uint)layout->cell_at(index);
 393   }
 394 
 395   static oop oop_at(DataLayout* layout, int index) {
 396     return cast_to_oop(layout->cell_at(index));
 397   }
 398 
 399   static void set_intptr_at(DataLayout* layout, int index, intptr_t value) {
 400     layout->set_cell_at(index, (intptr_t) value);
 401   }
 402 
 403   static void set_flag_at(DataLayout* layout, int flag_number) {
 404     layout->set_flag_at(flag_number);
 405   }
 406 #endif // CC_INTERP
 407 
 408 public:
 409   // Constructor for invalid ProfileData.
 410   ProfileData();
 411 
 412   u2 bci() const {
 413     return data()->bci();
 414   }
 415 
 416   address dp() {
 417     return (address)_data;
 418   }
 419 
 420   int trap_state() const {
 421     return data()->trap_state();
 422   }
 423   void set_trap_state(int new_state) {
 424     data()->set_trap_state(new_state);
 425   }
 426 
 427   // Type checking
 428   virtual bool is_BitData()         const { return false; }
 429   virtual bool is_CounterData()     const { return false; }
 430   virtual bool is_JumpData()        const { return false; }
 431   virtual bool is_ReceiverTypeData()const { return false; }
 432   virtual bool is_VirtualCallData() const { return false; }
 433   virtual bool is_RetData()         const { return false; }
 434   virtual bool is_BranchData()      const { return false; }
 435   virtual bool is_ArrayData()       const { return false; }
 436   virtual bool is_MultiBranchData() const { return false; }
 437   virtual bool is_ArgInfoData()     const { return false; }
 438   virtual bool is_CallTypeData()    const { return false; }
 439   virtual bool is_VirtualCallTypeData()const { return false; }
 440   virtual bool is_ParametersTypeData() const { return false; }
 441   virtual bool is_SpeculativeTrapData()const { return false; }
 442 
 443 
 444   BitData* as_BitData() const {
 445     assert(is_BitData(), "wrong type");
 446     return is_BitData()         ? (BitData*)        this : NULL;
 447   }
 448   CounterData* as_CounterData() const {
 449     assert(is_CounterData(), "wrong type");
 450     return is_CounterData()     ? (CounterData*)    this : NULL;
 451   }
 452   JumpData* as_JumpData() const {
 453     assert(is_JumpData(), "wrong type");
 454     return is_JumpData()        ? (JumpData*)       this : NULL;
 455   }
 456   ReceiverTypeData* as_ReceiverTypeData() const {
 457     assert(is_ReceiverTypeData(), "wrong type");
 458     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
 459   }
 460   VirtualCallData* as_VirtualCallData() const {
 461     assert(is_VirtualCallData(), "wrong type");
 462     return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
 463   }
 464   RetData* as_RetData() const {
 465     assert(is_RetData(), "wrong type");
 466     return is_RetData()         ? (RetData*)        this : NULL;
 467   }
 468   BranchData* as_BranchData() const {
 469     assert(is_BranchData(), "wrong type");
 470     return is_BranchData()      ? (BranchData*)     this : NULL;
 471   }
 472   ArrayData* as_ArrayData() const {
 473     assert(is_ArrayData(), "wrong type");
 474     return is_ArrayData()       ? (ArrayData*)      this : NULL;
 475   }
 476   MultiBranchData* as_MultiBranchData() const {
 477     assert(is_MultiBranchData(), "wrong type");
 478     return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
 479   }
 480   ArgInfoData* as_ArgInfoData() const {
 481     assert(is_ArgInfoData(), "wrong type");
 482     return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
 483   }
 484   CallTypeData* as_CallTypeData() const {
 485     assert(is_CallTypeData(), "wrong type");
 486     return is_CallTypeData() ? (CallTypeData*)this : NULL;
 487   }
 488   VirtualCallTypeData* as_VirtualCallTypeData() const {
 489     assert(is_VirtualCallTypeData(), "wrong type");
 490     return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
 491   }
 492   ParametersTypeData* as_ParametersTypeData() const {
 493     assert(is_ParametersTypeData(), "wrong type");
 494     return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
 495   }
 496   SpeculativeTrapData* as_SpeculativeTrapData() const {
 497     assert(is_SpeculativeTrapData(), "wrong type");
 498     return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
 499   }
 500 
 501 
 502   // Subclass specific initialization
 503   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
 504 
 505   // GC support
 506   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
 507 
 508   // Redefinition support
 509   virtual void clean_weak_method_links() {}
 510   DEBUG_ONLY(virtual void verify_clean_weak_method_links() {})
 511 
 512   // CI translation: ProfileData can represent both MethodDataOop data
 513   // as well as CIMethodData data. This function is provided for translating
 514   // an oop in a ProfileData to the ci equivalent. Generally speaking,
 515   // most ProfileData don't require any translation, so we provide the null
 516   // translation here, and the required translators are in the ci subclasses.
 517   virtual void translate_from(const ProfileData* data) {}
 518 
 519   virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
 520     ShouldNotReachHere();
 521   }
 522 
 523   void print_data_on(outputStream* st, const MethodData* md) const;
 524 
 525   void print_shared(outputStream* st, const char* name, const char* extra) const;
 526   void tab(outputStream* st, bool first = false) const;
 527 };
 528 
 529 // BitData
 530 //
 531 // A BitData holds a flag or two in its header.
 532 class BitData : public ProfileData {
 533   friend class VMStructs;
 534   friend class JVMCIVMStructs;
 535 protected:
 536   enum {
 537     // null_seen:
 538     //  saw a null operand (cast/aastore/instanceof)
 539       null_seen_flag              = DataLayout::first_flag + 0
 540 #if INCLUDE_JVMCI
 541     // bytecode threw any exception
 542     , exception_seen_flag         = null_seen_flag + 1
 543 #endif
 544   };
 545   enum { bit_cell_count = 0 };  // no additional data fields needed.
 546 public:
 547   BitData(DataLayout* layout) : ProfileData(layout) {
 548   }
 549 
 550   virtual bool is_BitData() const { return true; }
 551 
 552   static int static_cell_count() {
 553     return bit_cell_count;
 554   }
 555 
 556   virtual int cell_count() const {
 557     return static_cell_count();
 558   }
 559 
 560   // Accessor
 561 
 562   // The null_seen flag bit is specially known to the interpreter.
 563   // Consulting it allows the compiler to avoid setting up null_check traps.
 564   bool null_seen()     { return flag_at(null_seen_flag); }
 565   void set_null_seen()    { set_flag_at(null_seen_flag); }
 566 
 567 #if INCLUDE_JVMCI
 568   // true if an exception was thrown at the specific BCI
 569   bool exception_seen() { return flag_at(exception_seen_flag); }
 570   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 571 #endif
 572 
 573   // Code generation support
 574   static int null_seen_byte_constant() {
 575     return flag_number_to_byte_constant(null_seen_flag);
 576   }
 577 
 578   static ByteSize bit_data_size() {
 579     return cell_offset(bit_cell_count);
 580   }
 581 
 582 #ifdef CC_INTERP
 583   static int bit_data_size_in_bytes() {
 584     return cell_offset_in_bytes(bit_cell_count);
 585   }
 586 
 587   static void set_null_seen(DataLayout* layout) {
 588     set_flag_at(layout, null_seen_flag);
 589   }
 590 
 591   static DataLayout* advance(DataLayout* layout) {
 592     return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
 593   }
 594 #endif // CC_INTERP
 595 
 596   void print_data_on(outputStream* st, const char* extra = NULL) const;
 597 };
 598 
 599 // CounterData
 600 //
 601 // A CounterData corresponds to a simple counter.
 602 class CounterData : public BitData {
 603   friend class VMStructs;
 604   friend class JVMCIVMStructs;
 605 protected:
 606   enum {
 607     count_off,
 608     counter_cell_count
 609   };
 610 public:
 611   CounterData(DataLayout* layout) : BitData(layout) {}
 612 
 613   virtual bool is_CounterData() const { return true; }
 614 
 615   static int static_cell_count() {
 616     return counter_cell_count;
 617   }
 618 
 619   virtual int cell_count() const {
 620     return static_cell_count();
 621   }
 622 
 623   // Direct accessor
 624   uint count() const {
 625     return uint_at(count_off);
 626   }
 627 
 628   // Code generation support
 629   static ByteSize count_offset() {
 630     return cell_offset(count_off);
 631   }
 632   static ByteSize counter_data_size() {
 633     return cell_offset(counter_cell_count);
 634   }
 635 
 636   void set_count(uint count) {
 637     set_uint_at(count_off, count);
 638   }
 639 
 640 #ifdef CC_INTERP
 641   static int counter_data_size_in_bytes() {
 642     return cell_offset_in_bytes(counter_cell_count);
 643   }
 644 
 645   static void increment_count_no_overflow(DataLayout* layout) {
 646     increment_uint_at_no_overflow(layout, count_off);
 647   }
 648 
 649   // Support counter decrementation at checkcast / subtype check failed.
 650   static void decrement_count(DataLayout* layout) {
 651     increment_uint_at_no_overflow(layout, count_off, -1);
 652   }
 653 
 654   static DataLayout* advance(DataLayout* layout) {
 655     return (DataLayout*) (((address)layout) + (ssize_t)CounterData::counter_data_size_in_bytes());
 656   }
 657 #endif // CC_INTERP
 658 
 659   void print_data_on(outputStream* st, const char* extra = NULL) const;
 660 };
 661 
 662 // JumpData
 663 //
 664 // A JumpData is used to access profiling information for a direct
 665 // branch.  It is a counter, used for counting the number of branches,
 666 // plus a data displacement, used for realigning the data pointer to
 667 // the corresponding target bci.
 668 class JumpData : public ProfileData {
 669   friend class VMStructs;
 670   friend class JVMCIVMStructs;
 671 protected:
 672   enum {
 673     taken_off_set,
 674     displacement_off_set,
 675     jump_cell_count
 676   };
 677 
 678   void set_displacement(int displacement) {
 679     set_int_at(displacement_off_set, displacement);
 680   }
 681 
 682 public:
 683   JumpData(DataLayout* layout) : ProfileData(layout) {
 684     assert(layout->tag() == DataLayout::jump_data_tag ||
 685       layout->tag() == DataLayout::branch_data_tag, "wrong type");
 686   }
 687 
 688   virtual bool is_JumpData() const { return true; }
 689 
 690   static int static_cell_count() {
 691     return jump_cell_count;
 692   }
 693 
 694   virtual int cell_count() const {
 695     return static_cell_count();
 696   }
 697 
 698   // Direct accessor
 699   uint taken() const {
 700     return uint_at(taken_off_set);
 701   }
 702 
 703   void set_taken(uint cnt) {
 704     set_uint_at(taken_off_set, cnt);
 705   }
 706 
 707   // Saturating counter
 708   uint inc_taken() {
 709     uint cnt = taken() + 1;
 710     // Did we wrap? Will compiler screw us??
 711     if (cnt == 0) cnt--;
 712     set_uint_at(taken_off_set, cnt);
 713     return cnt;
 714   }
 715 
 716   int displacement() const {
 717     return int_at(displacement_off_set);
 718   }
 719 
 720   // Code generation support
 721   static ByteSize taken_offset() {
 722     return cell_offset(taken_off_set);
 723   }
 724 
 725   static ByteSize displacement_offset() {
 726     return cell_offset(displacement_off_set);
 727   }
 728 
 729 #ifdef CC_INTERP
 730   static void increment_taken_count_no_overflow(DataLayout* layout) {
 731     increment_uint_at_no_overflow(layout, taken_off_set);
 732   }
 733 
 734   static DataLayout* advance_taken(DataLayout* layout) {
 735     return (DataLayout*) (((address)layout) + (ssize_t)int_at(layout, displacement_off_set));
 736   }
 737 
 738   static uint taken_count(DataLayout* layout) {
 739     return (uint) uint_at(layout, taken_off_set);
 740   }
 741 #endif // CC_INTERP
 742 
 743   // Specific initialization.
 744   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 745 
 746   void print_data_on(outputStream* st, const char* extra = NULL) const;
 747 };
 748 
 749 // Entries in a ProfileData object to record types: it can either be
 750 // none (no profile), unknown (conflicting profile data) or a klass if
 751 // a single one is seen. Whether a null reference was seen is also
 752 // recorded. No counter is associated with the type and a single type
 753 // is tracked (unlike VirtualCallData).
 754 class TypeEntries {
 755 
 756 public:
 757 
 758   // A single cell is used to record information for a type:
 759   // - the cell is initialized to 0
 760   // - when a type is discovered it is stored in the cell
 761   // - bit zero of the cell is used to record whether a null reference
 762   // was encountered or not
 763   // - bit 1 is set to record a conflict in the type information
 764 
 765   enum {
 766     null_seen = 1,
 767     type_mask = ~null_seen,
 768     type_unknown = 2,
 769     status_bits = null_seen | type_unknown,
 770     type_klass_mask = ~status_bits
 771   };
 772 
 773   // what to initialize a cell to
 774   static intptr_t type_none() {
 775     return 0;
 776   }
 777 
 778   // null seen = bit 0 set?
 779   static bool was_null_seen(intptr_t v) {
 780     return (v & null_seen) != 0;
 781   }
 782 
 783   // conflicting type information = bit 1 set?
 784   static bool is_type_unknown(intptr_t v) {
 785     return (v & type_unknown) != 0;
 786   }
 787 
 788   // not type information yet = all bits cleared, ignoring bit 0?
 789   static bool is_type_none(intptr_t v) {
 790     return (v & type_mask) == 0;
 791   }
 792 
 793   // recorded type: cell without bit 0 and 1
 794   static intptr_t klass_part(intptr_t v) {
 795     intptr_t r = v & type_klass_mask;
 796     return r;
 797   }
 798 
 799   // type recorded
 800   static Klass* valid_klass(intptr_t k) {
 801     if (!is_type_none(k) &&
 802         !is_type_unknown(k)) {
 803       Klass* res = (Klass*)klass_part(k);
 804       assert(res != NULL, "invalid");
 805       return res;
 806     } else {
 807       return NULL;
 808     }
 809   }
 810 
 811   static intptr_t with_status(intptr_t k, intptr_t in) {
 812     return k | (in & status_bits);
 813   }
 814 
 815   static intptr_t with_status(Klass* k, intptr_t in) {
 816     return with_status((intptr_t)k, in);
 817   }
 818 
 819   static void print_klass(outputStream* st, intptr_t k);
 820 
 821   // GC support
 822   static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
 823 
 824 protected:
 825   // ProfileData object these entries are part of
 826   ProfileData* _pd;
 827   // offset within the ProfileData object where the entries start
 828   const int _base_off;
 829 
 830   TypeEntries(int base_off)
 831     : _base_off(base_off), _pd(NULL) {}
 832 
 833   void set_intptr_at(int index, intptr_t value) {
 834     _pd->set_intptr_at(index, value);
 835   }
 836 
 837   intptr_t intptr_at(int index) const {
 838     return _pd->intptr_at(index);
 839   }
 840 
 841 public:
 842   void set_profile_data(ProfileData* pd) {
 843     _pd = pd;
 844   }
 845 };
 846 
 847 // Type entries used for arguments passed at a call and parameters on
 848 // method entry. 2 cells per entry: one for the type encoded as in
 849 // TypeEntries and one initialized with the stack slot where the
 850 // profiled object is to be found so that the interpreter can locate
 851 // it quickly.
 852 class TypeStackSlotEntries : public TypeEntries {
 853 
 854 private:
 855   enum {
 856     stack_slot_entry,
 857     type_entry,
 858     per_arg_cell_count
 859   };
 860 
 861   // offset of cell for stack slot for entry i within ProfileData object
 862   int stack_slot_offset(int i) const {
 863     return _base_off + stack_slot_local_offset(i);
 864   }
 865 
 866   const int _number_of_entries;
 867 
 868   // offset of cell for type for entry i within ProfileData object
 869   int type_offset_in_cells(int i) const {
 870     return _base_off + type_local_offset(i);
 871   }
 872 
 873 public:
 874 
 875   TypeStackSlotEntries(int base_off, int nb_entries)
 876     : TypeEntries(base_off), _number_of_entries(nb_entries) {}
 877 
 878   static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
 879 
 880   void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
 881 
 882   int number_of_entries() const { return _number_of_entries; }
 883 
 884   // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
 885   static int stack_slot_local_offset(int i) {
 886     return i * per_arg_cell_count + stack_slot_entry;
 887   }
 888 
 889   // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
 890   static int type_local_offset(int i) {
 891     return i * per_arg_cell_count + type_entry;
 892   }
 893 
 894   // stack slot for entry i
 895   uint stack_slot(int i) const {
 896     assert(i >= 0 && i < _number_of_entries, "oob");
 897     return _pd->uint_at(stack_slot_offset(i));
 898   }
 899 
 900   // set stack slot for entry i
 901   void set_stack_slot(int i, uint num) {
 902     assert(i >= 0 && i < _number_of_entries, "oob");
 903     _pd->set_uint_at(stack_slot_offset(i), num);
 904   }
 905 
 906   // type for entry i
 907   intptr_t type(int i) const {
 908     assert(i >= 0 && i < _number_of_entries, "oob");
 909     return _pd->intptr_at(type_offset_in_cells(i));
 910   }
 911 
 912   // set type for entry i
 913   void set_type(int i, intptr_t k) {
 914     assert(i >= 0 && i < _number_of_entries, "oob");
 915     _pd->set_intptr_at(type_offset_in_cells(i), k);
 916   }
 917 
 918   static ByteSize per_arg_size() {
 919     return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
 920   }
 921 
 922   static int per_arg_count() {
 923     return per_arg_cell_count;
 924   }
 925 
 926   ByteSize type_offset(int i) const {
 927     return DataLayout::cell_offset(type_offset_in_cells(i));
 928   }
 929 
 930   // GC support
 931   void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 932 
 933   void print_data_on(outputStream* st) const;
 934 };
 935 
 936 // Type entry used for return from a call. A single cell to record the
 937 // type.
 938 class ReturnTypeEntry : public TypeEntries {
 939 
 940 private:
 941   enum {
 942     cell_count = 1
 943   };
 944 
 945 public:
 946   ReturnTypeEntry(int base_off)
 947     : TypeEntries(base_off) {}
 948 
 949   void post_initialize() {
 950     set_type(type_none());
 951   }
 952 
 953   intptr_t type() const {
 954     return _pd->intptr_at(_base_off);
 955   }
 956 
 957   void set_type(intptr_t k) {
 958     _pd->set_intptr_at(_base_off, k);
 959   }
 960 
 961   static int static_cell_count() {
 962     return cell_count;
 963   }
 964 
 965   static ByteSize size() {
 966     return in_ByteSize(cell_count * DataLayout::cell_size);
 967   }
 968 
 969   ByteSize type_offset() {
 970     return DataLayout::cell_offset(_base_off);
 971   }
 972 
 973   // GC support
 974   void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 975 
 976   void print_data_on(outputStream* st) const;
 977 };
 978 
 979 // Entries to collect type information at a call: contains arguments
 980 // (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
 981 // number of cells. Because the number of cells for the return type is
 982 // smaller than the number of cells for the type of an arguments, the
 983 // number of cells is used to tell how many arguments are profiled and
 984 // whether a return value is profiled. See has_arguments() and
 985 // has_return().
 986 class TypeEntriesAtCall {
 987 private:
 988   static int stack_slot_local_offset(int i) {
 989     return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
 990   }
 991 
 992   static int argument_type_local_offset(int i) {
 993     return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
 994   }
 995 
 996 public:
 997 
 998   static int header_cell_count() {
 999     return 1;
1000   }
1001 
1002   static int cell_count_local_offset() {
1003     return 0;
1004   }
1005 
1006   static int compute_cell_count(BytecodeStream* stream);
1007 
1008   static void initialize(DataLayout* dl, int base, int cell_count) {
1009     int off = base + cell_count_local_offset();
1010     dl->set_cell_at(off, cell_count - base - header_cell_count());
1011   }
1012 
1013   static bool arguments_profiling_enabled();
1014   static bool return_profiling_enabled();
1015 
1016   // Code generation support
1017   static ByteSize cell_count_offset() {
1018     return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
1019   }
1020 
1021   static ByteSize args_data_offset() {
1022     return in_ByteSize(header_cell_count() * DataLayout::cell_size);
1023   }
1024 
1025   static ByteSize stack_slot_offset(int i) {
1026     return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
1027   }
1028 
1029   static ByteSize argument_type_offset(int i) {
1030     return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
1031   }
1032 
1033   static ByteSize return_only_size() {
1034     return ReturnTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
1035   }
1036 
1037 };
1038 
1039 // CallTypeData
1040 //
1041 // A CallTypeData is used to access profiling information about a non
1042 // virtual call for which we collect type information about arguments
1043 // and return value.
1044 class CallTypeData : public CounterData {
1045 private:
1046   // entries for arguments if any
1047   TypeStackSlotEntries _args;
1048   // entry for return type if any
1049   ReturnTypeEntry _ret;
1050 
1051   int cell_count_global_offset() const {
1052     return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1053   }
1054 
1055   // number of cells not counting the header
1056   int cell_count_no_header() const {
1057     return uint_at(cell_count_global_offset());
1058   }
1059 
1060   void check_number_of_arguments(int total) {
1061     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1062   }
1063 
1064 public:
1065   CallTypeData(DataLayout* layout) :
1066     CounterData(layout),
1067     _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1068     _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1069   {
1070     assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
1071     // Some compilers (VC++) don't want this passed in member initialization list
1072     _args.set_profile_data(this);
1073     _ret.set_profile_data(this);
1074   }
1075 
1076   const TypeStackSlotEntries* args() const {
1077     assert(has_arguments(), "no profiling of arguments");
1078     return &_args;
1079   }
1080 
1081   const ReturnTypeEntry* ret() const {
1082     assert(has_return(), "no profiling of return value");
1083     return &_ret;
1084   }
1085 
1086   virtual bool is_CallTypeData() const { return true; }
1087 
1088   static int static_cell_count() {
1089     return -1;
1090   }
1091 
1092   static int compute_cell_count(BytecodeStream* stream) {
1093     return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1094   }
1095 
1096   static void initialize(DataLayout* dl, int cell_count) {
1097     TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1098   }
1099 
1100   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1101 
1102   virtual int cell_count() const {
1103     return CounterData::static_cell_count() +
1104       TypeEntriesAtCall::header_cell_count() +
1105       int_at_unchecked(cell_count_global_offset());
1106   }
1107 
1108   int number_of_arguments() const {
1109     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1110   }
1111 
1112   void set_argument_type(int i, Klass* k) {
1113     assert(has_arguments(), "no arguments!");
1114     intptr_t current = _args.type(i);
1115     _args.set_type(i, TypeEntries::with_status(k, current));
1116   }
1117 
1118   void set_return_type(Klass* k) {
1119     assert(has_return(), "no return!");
1120     intptr_t current = _ret.type();
1121     _ret.set_type(TypeEntries::with_status(k, current));
1122   }
1123 
1124   // An entry for a return value takes less space than an entry for an
1125   // argument so if the number of cells exceeds the number of cells
1126   // needed for an argument, this object contains type information for
1127   // at least one argument.
1128   bool has_arguments() const {
1129     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1130     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1131     return res;
1132   }
1133 
1134   // An entry for a return value takes less space than an entry for an
1135   // argument, so if the remainder of the number of cells divided by
1136   // the number of cells for an argument is not null, a return value
1137   // is profiled in this object.
1138   bool has_return() const {
1139     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1140     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1141     return res;
1142   }
1143 
1144   // Code generation support
1145   static ByteSize args_data_offset() {
1146     return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1147   }
1148 
1149   ByteSize argument_type_offset(int i) {
1150     return _args.type_offset(i);
1151   }
1152 
1153   ByteSize return_type_offset() {
1154     return _ret.type_offset();
1155   }
1156 
1157   // GC support
1158   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
1159     if (has_arguments()) {
1160       _args.clean_weak_klass_links(is_alive_closure);
1161     }
1162     if (has_return()) {
1163       _ret.clean_weak_klass_links(is_alive_closure);
1164     }
1165   }
1166 
1167   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1168 };
1169 
1170 // ReceiverTypeData
1171 //
1172 // A ReceiverTypeData is used to access profiling information about a
1173 // dynamic type check.  It consists of a counter which counts the total times
1174 // that the check is reached, and a series of (Klass*, count) pairs
1175 // which are used to store a type profile for the receiver of the check.
1176 class ReceiverTypeData : public CounterData {
1177   friend class VMStructs;
1178   friend class JVMCIVMStructs;
1179 protected:
1180   enum {
1181 #if INCLUDE_JVMCI
1182     // Description of the different counters
1183     // ReceiverTypeData for instanceof/checkcast/aastore:
1184     //   count is decremented for failed type checks
1185     //   JVMCI only: nonprofiled_count is incremented on type overflow
1186     // VirtualCallData for invokevirtual/invokeinterface:
1187     //   count is incremented on type overflow
1188     //   JVMCI only: nonprofiled_count is incremented on method overflow
1189 
1190     // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1191     nonprofiled_count_off_set = counter_cell_count,
1192     receiver0_offset,
1193 #else
1194     receiver0_offset = counter_cell_count,
1195 #endif
1196     count0_offset,
1197     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1198   };
1199 
1200 public:
1201   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1202     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1203            layout->tag() == DataLayout::virtual_call_data_tag ||
1204            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1205   }
1206 
1207   virtual bool is_ReceiverTypeData() const { return true; }
1208 
1209   static int static_cell_count() {
1210     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1211   }
1212 
1213   virtual int cell_count() const {
1214     return static_cell_count();
1215   }
1216 
1217   // Direct accessors
1218   static uint row_limit() {
1219     return TypeProfileWidth;
1220   }
1221   static int receiver_cell_index(uint row) {
1222     return receiver0_offset + row * receiver_type_row_cell_count;
1223   }
1224   static int receiver_count_cell_index(uint row) {
1225     return count0_offset + row * receiver_type_row_cell_count;
1226   }
1227 
1228   Klass* receiver(uint row) const {
1229     assert(row < row_limit(), "oob");
1230 
1231     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1232     assert(recv == NULL || recv->is_klass(), "wrong type");
1233     return recv;
1234   }
1235 
1236   void set_receiver(uint row, Klass* k) {
1237     assert((uint)row < row_limit(), "oob");
1238     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1239   }
1240 
1241   uint receiver_count(uint row) const {
1242     assert(row < row_limit(), "oob");
1243     return uint_at(receiver_count_cell_index(row));
1244   }
1245 
1246   void set_receiver_count(uint row, uint count) {
1247     assert(row < row_limit(), "oob");
1248     set_uint_at(receiver_count_cell_index(row), count);
1249   }
1250 
1251   void clear_row(uint row) {
1252     assert(row < row_limit(), "oob");
1253     // Clear total count - indicator of polymorphic call site.
1254     // The site may look like as monomorphic after that but
1255     // it allow to have more accurate profiling information because
1256     // there was execution phase change since klasses were unloaded.
1257     // If the site is still polymorphic then MDO will be updated
1258     // to reflect it. But it could be the case that the site becomes
1259     // only bimorphic. Then keeping total count not 0 will be wrong.
1260     // Even if we use monomorphic (when it is not) for compilation
1261     // we will only have trap, deoptimization and recompile again
1262     // with updated MDO after executing method in Interpreter.
1263     // An additional receiver will be recorded in the cleaned row
1264     // during next call execution.
1265     //
1266     // Note: our profiling logic works with empty rows in any slot.
1267     // We do sorting a profiling info (ciCallProfile) for compilation.
1268     //
1269     set_count(0);
1270     set_receiver(row, NULL);
1271     set_receiver_count(row, 0);
1272 #if INCLUDE_JVMCI
1273     if (!this->is_VirtualCallData()) {
1274       // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1275       // must also be reset (see "Description of the different counters" above)
1276       set_nonprofiled_count(0);
1277     }
1278 #endif
1279   }
1280 
1281   // Code generation support
1282   static ByteSize receiver_offset(uint row) {
1283     return cell_offset(receiver_cell_index(row));
1284   }
1285   static ByteSize receiver_count_offset(uint row) {
1286     return cell_offset(receiver_count_cell_index(row));
1287   }
1288 #if INCLUDE_JVMCI
1289   static ByteSize nonprofiled_receiver_count_offset() {
1290     return cell_offset(nonprofiled_count_off_set);
1291   }
1292   uint nonprofiled_count() const {
1293     return uint_at(nonprofiled_count_off_set);
1294   }
1295   void set_nonprofiled_count(uint count) {
1296     set_uint_at(nonprofiled_count_off_set, count);
1297   }
1298 #endif // INCLUDE_JVMCI
1299   static ByteSize receiver_type_data_size() {
1300     return cell_offset(static_cell_count());
1301   }
1302 
1303   // GC support
1304   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1305 
1306 #ifdef CC_INTERP
1307   static int receiver_type_data_size_in_bytes() {
1308     return cell_offset_in_bytes(static_cell_count());
1309   }
1310 
1311   static Klass *receiver_unchecked(DataLayout* layout, uint row) {
1312     Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row));
1313     return recv;
1314   }
1315 
1316   static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
1317     const int num_rows = row_limit();
1318     // Receiver already exists?
1319     for (int row = 0; row < num_rows; row++) {
1320       if (receiver_unchecked(layout, row) == rcvr) {
1321         increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
1322         return;
1323       }
1324     }
1325     // New receiver, find a free slot.
1326     for (int row = 0; row < num_rows; row++) {
1327       if (receiver_unchecked(layout, row) == NULL) {
1328         set_intptr_at(layout, receiver_cell_index(row), (intptr_t)rcvr);
1329         increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
1330         return;
1331       }
1332     }
1333     // Receiver did not match any saved receiver and there is no empty row for it.
1334     // Increment total counter to indicate polymorphic case.
1335     increment_count_no_overflow(layout);
1336   }
1337 
1338   static DataLayout* advance(DataLayout* layout) {
1339     return (DataLayout*) (((address)layout) + (ssize_t)ReceiverTypeData::receiver_type_data_size_in_bytes());
1340   }
1341 #endif // CC_INTERP
1342 
1343   void print_receiver_data_on(outputStream* st) const;
1344   void print_data_on(outputStream* st, const char* extra = NULL) const;
1345 };
1346 
1347 // VirtualCallData
1348 //
1349 // A VirtualCallData is used to access profiling information about a
1350 // virtual call.  For now, it has nothing more than a ReceiverTypeData.
1351 class VirtualCallData : public ReceiverTypeData {
1352 public:
1353   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1354     assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1355            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1356   }
1357 
1358   virtual bool is_VirtualCallData() const { return true; }
1359 
1360   static int static_cell_count() {
1361     // At this point we could add more profile state, e.g., for arguments.
1362     // But for now it's the same size as the base record type.
1363     return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count);
1364   }
1365 
1366   virtual int cell_count() const {
1367     return static_cell_count();
1368   }
1369 
1370   // Direct accessors
1371   static ByteSize virtual_call_data_size() {
1372     return cell_offset(static_cell_count());
1373   }
1374 
1375 #ifdef CC_INTERP
1376   static int virtual_call_data_size_in_bytes() {
1377     return cell_offset_in_bytes(static_cell_count());
1378   }
1379 
1380   static DataLayout* advance(DataLayout* layout) {
1381     return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
1382   }
1383 #endif // CC_INTERP
1384 
1385 #if INCLUDE_JVMCI
1386   static ByteSize method_offset(uint row) {
1387     return cell_offset(method_cell_index(row));
1388   }
1389   static ByteSize method_count_offset(uint row) {
1390     return cell_offset(method_count_cell_index(row));
1391   }
1392   static int method_cell_index(uint row) {
1393     return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1394   }
1395   static int method_count_cell_index(uint row) {
1396     return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1397   }
1398   static uint method_row_limit() {
1399     return MethodProfileWidth;
1400   }
1401 
1402   Method* method(uint row) const {
1403     assert(row < method_row_limit(), "oob");
1404 
1405     Method* method = (Method*)intptr_at(method_cell_index(row));
1406     assert(method == NULL || method->is_method(), "must be");
1407     return method;
1408   }
1409 
1410   uint method_count(uint row) const {
1411     assert(row < method_row_limit(), "oob");
1412     return uint_at(method_count_cell_index(row));
1413   }
1414 
1415   void set_method(uint row, Method* m) {
1416     assert((uint)row < method_row_limit(), "oob");
1417     set_intptr_at(method_cell_index(row), (uintptr_t)m);
1418   }
1419 
1420   void set_method_count(uint row, uint count) {
1421     assert(row < method_row_limit(), "oob");
1422     set_uint_at(method_count_cell_index(row), count);
1423   }
1424 
1425   void clear_method_row(uint row) {
1426     assert(row < method_row_limit(), "oob");
1427     // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData).
1428     set_nonprofiled_count(0);
1429     set_method(row, NULL);
1430     set_method_count(row, 0);
1431   }
1432 
1433   // GC support
1434   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1435 
1436   // Redefinition support
1437   virtual void clean_weak_method_links();
1438 #endif // INCLUDE_JVMCI
1439 
1440   void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1441   void print_data_on(outputStream* st, const char* extra = NULL) const;
1442 };
1443 
1444 // VirtualCallTypeData
1445 //
1446 // A VirtualCallTypeData is used to access profiling information about
1447 // a virtual call for which we collect type information about
1448 // arguments and return value.
1449 class VirtualCallTypeData : public VirtualCallData {
1450 private:
1451   // entries for arguments if any
1452   TypeStackSlotEntries _args;
1453   // entry for return type if any
1454   ReturnTypeEntry _ret;
1455 
1456   int cell_count_global_offset() const {
1457     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1458   }
1459 
1460   // number of cells not counting the header
1461   int cell_count_no_header() const {
1462     return uint_at(cell_count_global_offset());
1463   }
1464 
1465   void check_number_of_arguments(int total) {
1466     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1467   }
1468 
1469 public:
1470   VirtualCallTypeData(DataLayout* layout) :
1471     VirtualCallData(layout),
1472     _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1473     _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1474   {
1475     assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1476     // Some compilers (VC++) don't want this passed in member initialization list
1477     _args.set_profile_data(this);
1478     _ret.set_profile_data(this);
1479   }
1480 
1481   const TypeStackSlotEntries* args() const {
1482     assert(has_arguments(), "no profiling of arguments");
1483     return &_args;
1484   }
1485 
1486   const ReturnTypeEntry* ret() const {
1487     assert(has_return(), "no profiling of return value");
1488     return &_ret;
1489   }
1490 
1491   virtual bool is_VirtualCallTypeData() const { return true; }
1492 
1493   static int static_cell_count() {
1494     return -1;
1495   }
1496 
1497   static int compute_cell_count(BytecodeStream* stream) {
1498     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1499   }
1500 
1501   static void initialize(DataLayout* dl, int cell_count) {
1502     TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1503   }
1504 
1505   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1506 
1507   virtual int cell_count() const {
1508     return VirtualCallData::static_cell_count() +
1509       TypeEntriesAtCall::header_cell_count() +
1510       int_at_unchecked(cell_count_global_offset());
1511   }
1512 
1513   int number_of_arguments() const {
1514     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1515   }
1516 
1517   void set_argument_type(int i, Klass* k) {
1518     assert(has_arguments(), "no arguments!");
1519     intptr_t current = _args.type(i);
1520     _args.set_type(i, TypeEntries::with_status(k, current));
1521   }
1522 
1523   void set_return_type(Klass* k) {
1524     assert(has_return(), "no return!");
1525     intptr_t current = _ret.type();
1526     _ret.set_type(TypeEntries::with_status(k, current));
1527   }
1528 
1529   // An entry for a return value takes less space than an entry for an
1530   // argument, so if the remainder of the number of cells divided by
1531   // the number of cells for an argument is not null, a return value
1532   // is profiled in this object.
1533   bool has_return() const {
1534     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1535     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1536     return res;
1537   }
1538 
1539   // An entry for a return value takes less space than an entry for an
1540   // argument so if the number of cells exceeds the number of cells
1541   // needed for an argument, this object contains type information for
1542   // at least one argument.
1543   bool has_arguments() const {
1544     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1545     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1546     return res;
1547   }
1548 
1549   // Code generation support
1550   static ByteSize args_data_offset() {
1551     return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1552   }
1553 
1554   ByteSize argument_type_offset(int i) {
1555     return _args.type_offset(i);
1556   }
1557 
1558   ByteSize return_type_offset() {
1559     return _ret.type_offset();
1560   }
1561 
1562   // GC support
1563   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
1564     ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
1565     if (has_arguments()) {
1566       _args.clean_weak_klass_links(is_alive_closure);
1567     }
1568     if (has_return()) {
1569       _ret.clean_weak_klass_links(is_alive_closure);
1570     }
1571   }
1572 
1573   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1574 };
1575 
1576 // RetData
1577 //
1578 // A RetData is used to access profiling information for a ret bytecode.
1579 // It is composed of a count of the number of times that the ret has
1580 // been executed, followed by a series of triples of the form
1581 // (bci, count, di) which count the number of times that some bci was the
1582 // target of the ret and cache a corresponding data displacement.
1583 class RetData : public CounterData {
1584 protected:
1585   enum {
1586     bci0_offset = counter_cell_count,
1587     count0_offset,
1588     displacement0_offset,
1589     ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1590   };
1591 
1592   void set_bci(uint row, int bci) {
1593     assert((uint)row < row_limit(), "oob");
1594     set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1595   }
1596   void release_set_bci(uint row, int bci);
1597   void set_bci_count(uint row, uint count) {
1598     assert((uint)row < row_limit(), "oob");
1599     set_uint_at(count0_offset + row * ret_row_cell_count, count);
1600   }
1601   void set_bci_displacement(uint row, int disp) {
1602     set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1603   }
1604 
1605 public:
1606   RetData(DataLayout* layout) : CounterData(layout) {
1607     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1608   }
1609 
1610   virtual bool is_RetData() const { return true; }
1611 
1612   enum {
1613     no_bci = -1 // value of bci when bci1/2 are not in use.
1614   };
1615 
1616   static int static_cell_count() {
1617     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1618   }
1619 
1620   virtual int cell_count() const {
1621     return static_cell_count();
1622   }
1623 
1624   static uint row_limit() {
1625     return BciProfileWidth;
1626   }
1627   static int bci_cell_index(uint row) {
1628     return bci0_offset + row * ret_row_cell_count;
1629   }
1630   static int bci_count_cell_index(uint row) {
1631     return count0_offset + row * ret_row_cell_count;
1632   }
1633   static int bci_displacement_cell_index(uint row) {
1634     return displacement0_offset + row * ret_row_cell_count;
1635   }
1636 
1637   // Direct accessors
1638   int bci(uint row) const {
1639     return int_at(bci_cell_index(row));
1640   }
1641   uint bci_count(uint row) const {
1642     return uint_at(bci_count_cell_index(row));
1643   }
1644   int bci_displacement(uint row) const {
1645     return int_at(bci_displacement_cell_index(row));
1646   }
1647 
1648   // Interpreter Runtime support
1649   address fixup_ret(int return_bci, MethodData* mdo);
1650 
1651   // Code generation support
1652   static ByteSize bci_offset(uint row) {
1653     return cell_offset(bci_cell_index(row));
1654   }
1655   static ByteSize bci_count_offset(uint row) {
1656     return cell_offset(bci_count_cell_index(row));
1657   }
1658   static ByteSize bci_displacement_offset(uint row) {
1659     return cell_offset(bci_displacement_cell_index(row));
1660   }
1661 
1662 #ifdef CC_INTERP
1663   static DataLayout* advance(MethodData *md, int bci);
1664 #endif // CC_INTERP
1665 
1666   // Specific initialization.
1667   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1668 
1669   void print_data_on(outputStream* st, const char* extra = NULL) const;
1670 };
1671 
1672 // BranchData
1673 //
1674 // A BranchData is used to access profiling data for a two-way branch.
1675 // It consists of taken and not_taken counts as well as a data displacement
1676 // for the taken case.
1677 class BranchData : public JumpData {
1678   friend class VMStructs;
1679   friend class JVMCIVMStructs;
1680 protected:
1681   enum {
1682     not_taken_off_set = jump_cell_count,
1683     branch_cell_count
1684   };
1685 
1686   void set_displacement(int displacement) {
1687     set_int_at(displacement_off_set, displacement);
1688   }
1689 
1690 public:
1691   BranchData(DataLayout* layout) : JumpData(layout) {
1692     assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
1693   }
1694 
1695   virtual bool is_BranchData() const { return true; }
1696 
1697   static int static_cell_count() {
1698     return branch_cell_count;
1699   }
1700 
1701   virtual int cell_count() const {
1702     return static_cell_count();
1703   }
1704 
1705   // Direct accessor
1706   uint not_taken() const {
1707     return uint_at(not_taken_off_set);
1708   }
1709 
1710   void set_not_taken(uint cnt) {
1711     set_uint_at(not_taken_off_set, cnt);
1712   }
1713 
1714   uint inc_not_taken() {
1715     uint cnt = not_taken() + 1;
1716     // Did we wrap? Will compiler screw us??
1717     if (cnt == 0) cnt--;
1718     set_uint_at(not_taken_off_set, cnt);
1719     return cnt;
1720   }
1721 
1722   // Code generation support
1723   static ByteSize not_taken_offset() {
1724     return cell_offset(not_taken_off_set);
1725   }
1726   static ByteSize branch_data_size() {
1727     return cell_offset(branch_cell_count);
1728   }
1729 
1730 #ifdef CC_INTERP
1731   static int branch_data_size_in_bytes() {
1732     return cell_offset_in_bytes(branch_cell_count);
1733   }
1734 
1735   static void increment_not_taken_count_no_overflow(DataLayout* layout) {
1736     increment_uint_at_no_overflow(layout, not_taken_off_set);
1737   }
1738 
1739   static DataLayout* advance_not_taken(DataLayout* layout) {
1740     return (DataLayout*) (((address)layout) + (ssize_t)BranchData::branch_data_size_in_bytes());
1741   }
1742 #endif // CC_INTERP
1743 
1744   // Specific initialization.
1745   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1746 
1747   void print_data_on(outputStream* st, const char* extra = NULL) const;
1748 };
1749 
1750 // ArrayData
1751 //
1752 // A ArrayData is a base class for accessing profiling data which does
1753 // not have a statically known size.  It consists of an array length
1754 // and an array start.
1755 class ArrayData : public ProfileData {
1756   friend class VMStructs;
1757   friend class JVMCIVMStructs;
1758 protected:
1759   friend class DataLayout;
1760 
1761   enum {
1762     array_len_off_set,
1763     array_start_off_set
1764   };
1765 
1766   uint array_uint_at(int index) const {
1767     int aindex = index + array_start_off_set;
1768     return uint_at(aindex);
1769   }
1770   int array_int_at(int index) const {
1771     int aindex = index + array_start_off_set;
1772     return int_at(aindex);
1773   }
1774   oop array_oop_at(int index) const {
1775     int aindex = index + array_start_off_set;
1776     return oop_at(aindex);
1777   }
1778   void array_set_int_at(int index, int value) {
1779     int aindex = index + array_start_off_set;
1780     set_int_at(aindex, value);
1781   }
1782 
1783 #ifdef CC_INTERP
1784   // Static low level accessors for DataLayout with ArrayData's semantics.
1785 
1786   static void increment_array_uint_at_no_overflow(DataLayout* layout, int index) {
1787     int aindex = index + array_start_off_set;
1788     increment_uint_at_no_overflow(layout, aindex);
1789   }
1790 
1791   static int array_int_at(DataLayout* layout, int index) {
1792     int aindex = index + array_start_off_set;
1793     return int_at(layout, aindex);
1794   }
1795 #endif // CC_INTERP
1796 
1797   // Code generation support for subclasses.
1798   static ByteSize array_element_offset(int index) {
1799     return cell_offset(array_start_off_set + index);
1800   }
1801 
1802 public:
1803   ArrayData(DataLayout* layout) : ProfileData(layout) {}
1804 
1805   virtual bool is_ArrayData() const { return true; }
1806 
1807   static int static_cell_count() {
1808     return -1;
1809   }
1810 
1811   int array_len() const {
1812     return int_at_unchecked(array_len_off_set);
1813   }
1814 
1815   virtual int cell_count() const {
1816     return array_len() + 1;
1817   }
1818 
1819   // Code generation support
1820   static ByteSize array_len_offset() {
1821     return cell_offset(array_len_off_set);
1822   }
1823   static ByteSize array_start_offset() {
1824     return cell_offset(array_start_off_set);
1825   }
1826 };
1827 
1828 // MultiBranchData
1829 //
1830 // A MultiBranchData is used to access profiling information for
1831 // a multi-way branch (*switch bytecodes).  It consists of a series
1832 // of (count, displacement) pairs, which count the number of times each
1833 // case was taken and specify the data displacment for each branch target.
1834 class MultiBranchData : public ArrayData {
1835   friend class VMStructs;
1836   friend class JVMCIVMStructs;
1837 protected:
1838   enum {
1839     default_count_off_set,
1840     default_disaplacement_off_set,
1841     case_array_start
1842   };
1843   enum {
1844     relative_count_off_set,
1845     relative_displacement_off_set,
1846     per_case_cell_count
1847   };
1848 
1849   void set_default_displacement(int displacement) {
1850     array_set_int_at(default_disaplacement_off_set, displacement);
1851   }
1852   void set_displacement_at(int index, int displacement) {
1853     array_set_int_at(case_array_start +
1854                      index * per_case_cell_count +
1855                      relative_displacement_off_set,
1856                      displacement);
1857   }
1858 
1859 public:
1860   MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1861     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1862   }
1863 
1864   virtual bool is_MultiBranchData() const { return true; }
1865 
1866   static int compute_cell_count(BytecodeStream* stream);
1867 
1868   int number_of_cases() const {
1869     int alen = array_len() - 2; // get rid of default case here.
1870     assert(alen % per_case_cell_count == 0, "must be even");
1871     return (alen / per_case_cell_count);
1872   }
1873 
1874   uint default_count() const {
1875     return array_uint_at(default_count_off_set);
1876   }
1877   int default_displacement() const {
1878     return array_int_at(default_disaplacement_off_set);
1879   }
1880 
1881   uint count_at(int index) const {
1882     return array_uint_at(case_array_start +
1883                          index * per_case_cell_count +
1884                          relative_count_off_set);
1885   }
1886   int displacement_at(int index) const {
1887     return array_int_at(case_array_start +
1888                         index * per_case_cell_count +
1889                         relative_displacement_off_set);
1890   }
1891 
1892   // Code generation support
1893   static ByteSize default_count_offset() {
1894     return array_element_offset(default_count_off_set);
1895   }
1896   static ByteSize default_displacement_offset() {
1897     return array_element_offset(default_disaplacement_off_set);
1898   }
1899   static ByteSize case_count_offset(int index) {
1900     return case_array_offset() +
1901            (per_case_size() * index) +
1902            relative_count_offset();
1903   }
1904   static ByteSize case_array_offset() {
1905     return array_element_offset(case_array_start);
1906   }
1907   static ByteSize per_case_size() {
1908     return in_ByteSize(per_case_cell_count) * cell_size;
1909   }
1910   static ByteSize relative_count_offset() {
1911     return in_ByteSize(relative_count_off_set) * cell_size;
1912   }
1913   static ByteSize relative_displacement_offset() {
1914     return in_ByteSize(relative_displacement_off_set) * cell_size;
1915   }
1916 
1917 #ifdef CC_INTERP
1918   static void increment_count_no_overflow(DataLayout* layout, int index) {
1919     if (index == -1) {
1920       increment_array_uint_at_no_overflow(layout, default_count_off_set);
1921     } else {
1922       increment_array_uint_at_no_overflow(layout, case_array_start +
1923                                                   index * per_case_cell_count +
1924                                                   relative_count_off_set);
1925     }
1926   }
1927 
1928   static DataLayout* advance(DataLayout* layout, int index) {
1929     if (index == -1) {
1930       return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, default_disaplacement_off_set));
1931     } else {
1932       return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, case_array_start +
1933                                                                               index * per_case_cell_count +
1934                                                                               relative_displacement_off_set));
1935     }
1936   }
1937 #endif // CC_INTERP
1938 
1939   // Specific initialization.
1940   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1941 
1942   void print_data_on(outputStream* st, const char* extra = NULL) const;
1943 };
1944 
1945 class ArgInfoData : public ArrayData {
1946 
1947 public:
1948   ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1949     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1950   }
1951 
1952   virtual bool is_ArgInfoData() const { return true; }
1953 
1954 
1955   int number_of_args() const {
1956     return array_len();
1957   }
1958 
1959   uint arg_modified(int arg) const {
1960     return array_uint_at(arg);
1961   }
1962 
1963   void set_arg_modified(int arg, uint val) {
1964     array_set_int_at(arg, val);
1965   }
1966 
1967   void print_data_on(outputStream* st, const char* extra = NULL) const;
1968 };
1969 
1970 // ParametersTypeData
1971 //
1972 // A ParametersTypeData is used to access profiling information about
1973 // types of parameters to a method
1974 class ParametersTypeData : public ArrayData {
1975 
1976 private:
1977   TypeStackSlotEntries _parameters;
1978 
1979   static int stack_slot_local_offset(int i) {
1980     assert_profiling_enabled();
1981     return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1982   }
1983 
1984   static int type_local_offset(int i) {
1985     assert_profiling_enabled();
1986     return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1987   }
1988 
1989   static bool profiling_enabled();
1990   static void assert_profiling_enabled() {
1991     assert(profiling_enabled(), "method parameters profiling should be on");
1992   }
1993 
1994 public:
1995   ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1996     assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1997     // Some compilers (VC++) don't want this passed in member initialization list
1998     _parameters.set_profile_data(this);
1999   }
2000 
2001   static int compute_cell_count(Method* m);
2002 
2003   virtual bool is_ParametersTypeData() const { return true; }
2004 
2005   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
2006 
2007   int number_of_parameters() const {
2008     return array_len() / TypeStackSlotEntries::per_arg_count();
2009   }
2010 
2011   const TypeStackSlotEntries* parameters() const { return &_parameters; }
2012 
2013   uint stack_slot(int i) const {
2014     return _parameters.stack_slot(i);
2015   }
2016 
2017   void set_type(int i, Klass* k) {
2018     intptr_t current = _parameters.type(i);
2019     _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
2020   }
2021 
2022   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
2023     _parameters.clean_weak_klass_links(is_alive_closure);
2024   }
2025 
2026   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
2027 
2028   static ByteSize stack_slot_offset(int i) {
2029     return cell_offset(stack_slot_local_offset(i));
2030   }
2031 
2032   static ByteSize type_offset(int i) {
2033     return cell_offset(type_local_offset(i));
2034   }
2035 };
2036 
2037 // SpeculativeTrapData
2038 //
2039 // A SpeculativeTrapData is used to record traps due to type
2040 // speculation. It records the root of the compilation: that type
2041 // speculation is wrong in the context of one compilation (for
2042 // method1) doesn't mean it's wrong in the context of another one (for
2043 // method2). Type speculation could have more/different data in the
2044 // context of the compilation of method2 and it's worthwhile to try an
2045 // optimization that failed for compilation of method1 in the context
2046 // of compilation of method2.
2047 // Space for SpeculativeTrapData entries is allocated from the extra
2048 // data space in the MDO. If we run out of space, the trap data for
2049 // the ProfileData at that bci is updated.
2050 class SpeculativeTrapData : public ProfileData {
2051 protected:
2052   enum {
2053     speculative_trap_method,
2054     speculative_trap_cell_count
2055   };
2056 public:
2057   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
2058     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
2059   }
2060 
2061   virtual bool is_SpeculativeTrapData() const { return true; }
2062 
2063   static int static_cell_count() {
2064     return speculative_trap_cell_count;
2065   }
2066 
2067   virtual int cell_count() const {
2068     return static_cell_count();
2069   }
2070 
2071   // Direct accessor
2072   Method* method() const {
2073     return (Method*)intptr_at(speculative_trap_method);
2074   }
2075 
2076   void set_method(Method* m) {
2077     assert(!m->is_old(), "cannot add old methods");
2078     set_intptr_at(speculative_trap_method, (intptr_t)m);
2079   }
2080 
2081   static ByteSize method_offset() {
2082     return cell_offset(speculative_trap_method);
2083   }
2084 
2085   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
2086 };
2087 
2088 // MethodData*
2089 //
2090 // A MethodData* holds information which has been collected about
2091 // a method.  Its layout looks like this:
2092 //
2093 // -----------------------------
2094 // | header                    |
2095 // | klass                     |
2096 // -----------------------------
2097 // | method                    |
2098 // | size of the MethodData* |
2099 // -----------------------------
2100 // | Data entries...           |
2101 // |   (variable size)         |
2102 // |                           |
2103 // .                           .
2104 // .                           .
2105 // .                           .
2106 // |                           |
2107 // -----------------------------
2108 //
2109 // The data entry area is a heterogeneous array of DataLayouts. Each
2110 // DataLayout in the array corresponds to a specific bytecode in the
2111 // method.  The entries in the array are sorted by the corresponding
2112 // bytecode.  Access to the data is via resource-allocated ProfileData,
2113 // which point to the underlying blocks of DataLayout structures.
2114 //
2115 // During interpretation, if profiling in enabled, the interpreter
2116 // maintains a method data pointer (mdp), which points at the entry
2117 // in the array corresponding to the current bci.  In the course of
2118 // intepretation, when a bytecode is encountered that has profile data
2119 // associated with it, the entry pointed to by mdp is updated, then the
2120 // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
2121 // is NULL to begin with, the interpreter assumes that the current method
2122 // is not (yet) being profiled.
2123 //
2124 // In MethodData* parlance, "dp" is a "data pointer", the actual address
2125 // of a DataLayout element.  A "di" is a "data index", the offset in bytes
2126 // from the base of the data entry array.  A "displacement" is the byte offset
2127 // in certain ProfileData objects that indicate the amount the mdp must be
2128 // adjusted in the event of a change in control flow.
2129 //
2130 
2131 CC_INTERP_ONLY(class BytecodeInterpreter;)
2132 class CleanExtraDataClosure;
2133 
2134 class MethodData : public Metadata {
2135   friend class VMStructs;
2136   friend class JVMCIVMStructs;
2137   CC_INTERP_ONLY(friend class BytecodeInterpreter;)
2138 private:
2139   friend class ProfileData;
2140   friend class TypeEntriesAtCall;
2141 
2142   // If you add a new field that points to any metaspace object, you
2143   // must add this field to MethodData::metaspace_pointers_do().
2144 
2145   // Back pointer to the Method*
2146   Method* _method;
2147 
2148   // Size of this oop in bytes
2149   int _size;
2150 
2151   // Cached hint for bci_to_dp and bci_to_data
2152   int _hint_di;
2153 
2154   Mutex _extra_data_lock;
2155 
2156   MethodData(const methodHandle& method, int size, TRAPS);
2157 public:
2158   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2159   MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2160 
2161   bool is_methodData() const volatile { return true; }
2162   void initialize();
2163 
2164   // Whole-method sticky bits and flags
2165   enum {
2166     _trap_hist_limit    = 23 JVMCI_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
2167     _trap_hist_mask     = max_jubyte,
2168     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2169   }; // Public flag values
2170 private:
2171   uint _nof_decompiles;             // count of all nmethod removals
2172   uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2173   uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2174   union {
2175     intptr_t _align;
2176     u1 _array[JVMCI_ONLY(2 *) _trap_hist_limit];
2177   } _trap_hist;
2178 
2179   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2180   intx              _eflags;          // flags on escape information
2181   intx              _arg_local;       // bit set of non-escaping arguments
2182   intx              _arg_stack;       // bit set of stack-allocatable arguments
2183   intx              _arg_returned;    // bit set of returned arguments
2184 
2185   int _creation_mileage;              // method mileage at MDO creation
2186 
2187   // How many invocations has this MDO seen?
2188   // These counters are used to determine the exact age of MDO.
2189   // We need those because in tiered a method can be concurrently
2190   // executed at different levels.
2191   InvocationCounter _invocation_counter;
2192   // Same for backedges.
2193   InvocationCounter _backedge_counter;
2194   // Counter values at the time profiling started.
2195   int               _invocation_counter_start;
2196   int               _backedge_counter_start;
2197   uint              _tenure_traps;
2198   int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2199   int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2200 
2201 #if INCLUDE_RTM_OPT
2202   // State of RTM code generation during compilation of the method
2203   int               _rtm_state;
2204 #endif
2205 
2206   // Number of loops and blocks is computed when compiling the first
2207   // time with C1. It is used to determine if method is trivial.
2208   short             _num_loops;
2209   short             _num_blocks;
2210   // Does this method contain anything worth profiling?
2211   enum WouldProfile {unknown, no_profile, profile};
2212   WouldProfile      _would_profile;
2213 
2214 #if INCLUDE_JVMCI
2215   // Support for HotSpotMethodData.setCompiledIRSize(int)
2216   int               _jvmci_ir_size;
2217 #endif
2218 
2219   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2220   int _data_size;
2221 
2222   // data index for the area dedicated to parameters. -1 if no
2223   // parameter profiling.
2224   enum { no_parameters = -2, parameters_uninitialized = -1 };
2225   int _parameters_type_data_di;
2226   int parameters_size_in_bytes() const {
2227     ParametersTypeData* param = parameters_type_data();
2228     return param == NULL ? 0 : param->size_in_bytes();
2229   }
2230 
2231   // Beginning of the data entries
2232   intptr_t _data[1];
2233 
2234   // Helper for size computation
2235   static int compute_data_size(BytecodeStream* stream);
2236   static int bytecode_cell_count(Bytecodes::Code code);
2237   static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2238   enum { no_profile_data = -1, variable_cell_count = -2 };
2239 
2240   // Helper for initialization
2241   DataLayout* data_layout_at(int data_index) const {
2242     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2243     return (DataLayout*) (((address)_data) + data_index);
2244   }
2245 
2246   // Initialize an individual data segment.  Returns the size of
2247   // the segment in bytes.
2248   int initialize_data(BytecodeStream* stream, int data_index);
2249 
2250   // Helper for data_at
2251   DataLayout* limit_data_position() const {
2252     return data_layout_at(_data_size);
2253   }
2254   bool out_of_bounds(int data_index) const {
2255     return data_index >= data_size();
2256   }
2257 
2258   // Give each of the data entries a chance to perform specific
2259   // data initialization.
2260   void post_initialize(BytecodeStream* stream);
2261 
2262   // hint accessors
2263   int      hint_di() const  { return _hint_di; }
2264   void set_hint_di(int di)  {
2265     assert(!out_of_bounds(di), "hint_di out of bounds");
2266     _hint_di = di;
2267   }
2268   ProfileData* data_before(int bci) {
2269     // avoid SEGV on this edge case
2270     if (data_size() == 0)
2271       return NULL;
2272     int hint = hint_di();
2273     if (data_layout_at(hint)->bci() <= bci)
2274       return data_at(hint);
2275     return first_data();
2276   }
2277 
2278   // What is the index of the first data entry?
2279   int first_di() const { return 0; }
2280 
2281   ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
2282   // Find or create an extra ProfileData:
2283   ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2284 
2285   // return the argument info cell
2286   ArgInfoData *arg_info();
2287 
2288   enum {
2289     no_type_profile = 0,
2290     type_profile_jsr292 = 1,
2291     type_profile_all = 2
2292   };
2293 
2294   static bool profile_jsr292(const methodHandle& m, int bci);
2295   static bool profile_unsafe(const methodHandle& m, int bci);
2296   static int profile_arguments_flag();
2297   static bool profile_all_arguments();
2298   static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2299   static int profile_return_flag();
2300   static bool profile_all_return();
2301   static bool profile_return_for_invoke(const methodHandle& m, int bci);
2302   static int profile_parameters_flag();
2303   static bool profile_parameters_jsr292_only();
2304   static bool profile_all_parameters();
2305 
2306   void clean_extra_data(CleanExtraDataClosure* cl);
2307   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2308   void verify_extra_data_clean(CleanExtraDataClosure* cl);
2309 
2310 public:
2311   static int header_size() {
2312     return sizeof(MethodData)/wordSize;
2313   }
2314 
2315   // Compute the size of a MethodData* before it is created.
2316   static int compute_allocation_size_in_bytes(const methodHandle& method);
2317   static int compute_allocation_size_in_words(const methodHandle& method);
2318   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2319 
2320   // Determine if a given bytecode can have profile information.
2321   static bool bytecode_has_profile(Bytecodes::Code code) {
2322     return bytecode_cell_count(code) != no_profile_data;
2323   }
2324 
2325   // reset into original state
2326   void init();
2327 
2328   // My size
2329   int size_in_bytes() const { return _size; }
2330   int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2331 #if INCLUDE_SERVICES
2332   void collect_statistics(KlassSizeStats *sz) const;
2333 #endif
2334 
2335   int      creation_mileage() const  { return _creation_mileage; }
2336   void set_creation_mileage(int x)   { _creation_mileage = x; }
2337 
2338   int invocation_count() {
2339     if (invocation_counter()->carry()) {
2340       return InvocationCounter::count_limit;
2341     }
2342     return invocation_counter()->count();
2343   }
2344   int backedge_count() {
2345     if (backedge_counter()->carry()) {
2346       return InvocationCounter::count_limit;
2347     }
2348     return backedge_counter()->count();
2349   }
2350 
2351   int invocation_count_start() {
2352     if (invocation_counter()->carry()) {
2353       return 0;
2354     }
2355     return _invocation_counter_start;
2356   }
2357 
2358   int backedge_count_start() {
2359     if (backedge_counter()->carry()) {
2360       return 0;
2361     }
2362     return _backedge_counter_start;
2363   }
2364 
2365   int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2366   int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2367 
2368   void reset_start_counters() {
2369     _invocation_counter_start = invocation_count();
2370     _backedge_counter_start = backedge_count();
2371   }
2372 
2373   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2374   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2375 
2376 #if INCLUDE_RTM_OPT
2377   int rtm_state() const {
2378     return _rtm_state;
2379   }
2380   void set_rtm_state(RTMState rstate) {
2381     _rtm_state = (int)rstate;
2382   }
2383   void atomic_set_rtm_state(RTMState rstate) {
2384     Atomic::store((int)rstate, &_rtm_state);
2385   }
2386 
2387   static int rtm_state_offset_in_bytes() {
2388     return offset_of(MethodData, _rtm_state);
2389   }
2390 #endif
2391 
2392   void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2393   bool would_profile() const                  { return _would_profile != no_profile; }
2394 
2395   int num_loops() const                       { return _num_loops;  }
2396   void set_num_loops(int n)                   { _num_loops = n;     }
2397   int num_blocks() const                      { return _num_blocks; }
2398   void set_num_blocks(int n)                  { _num_blocks = n;    }
2399 
2400   bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
2401   static int mileage_of(Method* m);
2402 
2403   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2404   enum EscapeFlag {
2405     estimated    = 1 << 0,
2406     return_local = 1 << 1,
2407     return_allocated = 1 << 2,
2408     allocated_escapes = 1 << 3,
2409     unknown_modified = 1 << 4
2410   };
2411 
2412   intx eflags()                                  { return _eflags; }
2413   intx arg_local()                               { return _arg_local; }
2414   intx arg_stack()                               { return _arg_stack; }
2415   intx arg_returned()                            { return _arg_returned; }
2416   uint arg_modified(int a)                       { ArgInfoData *aid = arg_info();
2417                                                    assert(aid != NULL, "arg_info must be not null");
2418                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2419                                                    return aid->arg_modified(a); }
2420 
2421   void set_eflags(intx v)                        { _eflags = v; }
2422   void set_arg_local(intx v)                     { _arg_local = v; }
2423   void set_arg_stack(intx v)                     { _arg_stack = v; }
2424   void set_arg_returned(intx v)                  { _arg_returned = v; }
2425   void set_arg_modified(int a, uint v)           { ArgInfoData *aid = arg_info();
2426                                                    assert(aid != NULL, "arg_info must be not null");
2427                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2428                                                    aid->set_arg_modified(a, v); }
2429 
2430   void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2431 
2432   // Location and size of data area
2433   address data_base() const {
2434     return (address) _data;
2435   }
2436   int data_size() const {
2437     return _data_size;
2438   }
2439 
2440   // Accessors
2441   Method* method() const { return _method; }
2442 
2443   // Get the data at an arbitrary (sort of) data index.
2444   ProfileData* data_at(int data_index) const;
2445 
2446   // Walk through the data in order.
2447   ProfileData* first_data() const { return data_at(first_di()); }
2448   ProfileData* next_data(ProfileData* current) const;
2449   bool is_valid(ProfileData* current) const { return current != NULL; }
2450 
2451   // Convert a dp (data pointer) to a di (data index).
2452   int dp_to_di(address dp) const {
2453     return dp - ((address)_data);
2454   }
2455 
2456   // bci to di/dp conversion.
2457   address bci_to_dp(int bci);
2458   int bci_to_di(int bci) {
2459     return dp_to_di(bci_to_dp(bci));
2460   }
2461 
2462   // Get the data at an arbitrary bci, or NULL if there is none.
2463   ProfileData* bci_to_data(int bci);
2464 
2465   // Same, but try to create an extra_data record if one is needed:
2466   ProfileData* allocate_bci_to_data(int bci, Method* m) {
2467     ProfileData* data = NULL;
2468     // If m not NULL, try to allocate a SpeculativeTrapData entry
2469     if (m == NULL) {
2470       data = bci_to_data(bci);
2471     }
2472     if (data != NULL) {
2473       return data;
2474     }
2475     data = bci_to_extra_data(bci, m, true);
2476     if (data != NULL) {
2477       return data;
2478     }
2479     // If SpeculativeTrapData allocation fails try to allocate a
2480     // regular entry
2481     data = bci_to_data(bci);
2482     if (data != NULL) {
2483       return data;
2484     }
2485     return bci_to_extra_data(bci, NULL, true);
2486   }
2487 
2488   // Add a handful of extra data records, for trap tracking.
2489   DataLayout* extra_data_base() const  { return limit_data_position(); }
2490   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2491   DataLayout* args_data_limit() const  { return (DataLayout*)((address)this + size_in_bytes() -
2492                                                               parameters_size_in_bytes()); }
2493   int extra_data_size() const          { return (address)extra_data_limit() - (address)extra_data_base(); }
2494   static DataLayout* next_extra(DataLayout* dp);
2495 
2496   // Return (uint)-1 for overflow.
2497   uint trap_count(int reason) const {
2498     assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2499     return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2500   }
2501   // For loops:
2502   static uint trap_reason_limit() { return _trap_hist_limit; }
2503   static uint trap_count_limit()  { return _trap_hist_mask; }
2504   uint inc_trap_count(int reason) {
2505     // Count another trap, anywhere in this method.
2506     assert(reason >= 0, "must be single trap");
2507     assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2508     uint cnt1 = 1 + _trap_hist._array[reason];
2509     if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2510       _trap_hist._array[reason] = cnt1;
2511       return cnt1;
2512     } else {
2513       return _trap_hist_mask + (++_nof_overflow_traps);
2514     }
2515   }
2516 
2517   uint overflow_trap_count() const {
2518     return _nof_overflow_traps;
2519   }
2520   uint overflow_recompile_count() const {
2521     return _nof_overflow_recompiles;
2522   }
2523   void inc_overflow_recompile_count() {
2524     _nof_overflow_recompiles += 1;
2525   }
2526   uint decompile_count() const {
2527     return _nof_decompiles;
2528   }
2529   void inc_decompile_count() {
2530     _nof_decompiles += 1;
2531     if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
2532       method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
2533     }
2534   }
2535   uint tenure_traps() const {
2536     return _tenure_traps;
2537   }
2538   void inc_tenure_traps() {
2539     _tenure_traps += 1;
2540   }
2541 
2542   // Return pointer to area dedicated to parameters in MDO
2543   ParametersTypeData* parameters_type_data() const {
2544     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2545     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2546   }
2547 
2548   int parameters_type_data_di() const {
2549     assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2550     return _parameters_type_data_di;
2551   }
2552 
2553   // Support for code generation
2554   static ByteSize data_offset() {
2555     return byte_offset_of(MethodData, _data[0]);
2556   }
2557 
2558   static ByteSize trap_history_offset() {
2559     return byte_offset_of(MethodData, _trap_hist._array);
2560   }
2561 
2562   static ByteSize invocation_counter_offset() {
2563     return byte_offset_of(MethodData, _invocation_counter);
2564   }
2565 
2566   static ByteSize backedge_counter_offset() {
2567     return byte_offset_of(MethodData, _backedge_counter);
2568   }
2569 
2570   static ByteSize invoke_mask_offset() {
2571     return byte_offset_of(MethodData, _invoke_mask);
2572   }
2573 
2574   static ByteSize backedge_mask_offset() {
2575     return byte_offset_of(MethodData, _backedge_mask);
2576   }
2577 
2578   static ByteSize parameters_type_data_di_offset() {
2579     return byte_offset_of(MethodData, _parameters_type_data_di);
2580   }
2581 
2582   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2583   virtual MetaspaceObj::Type type() const { return MethodDataType; }
2584 
2585   // Deallocation support - no pointer fields to deallocate
2586   void deallocate_contents(ClassLoaderData* loader_data) {}
2587 
2588   // GC support
2589   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2590 
2591   // Printing
2592   void print_on      (outputStream* st) const;
2593   void print_value_on(outputStream* st) const;
2594 
2595   // printing support for method data
2596   void print_data_on(outputStream* st) const;
2597 
2598   const char* internal_name() const { return "{method data}"; }
2599 
2600   // verification
2601   void verify_on(outputStream* st);
2602   void verify_data_on(outputStream* st);
2603 
2604   static bool profile_parameters_for_method(const methodHandle& m);
2605   static bool profile_arguments();
2606   static bool profile_arguments_jsr292_only();
2607   static bool profile_return();
2608   static bool profile_parameters();
2609   static bool profile_return_jsr292_only();
2610 
2611   void clean_method_data(BoolObjectClosure* is_alive);
2612   void clean_weak_method_links();
2613   DEBUG_ONLY(void verify_clean_weak_method_links();)
2614   Mutex* extra_data_lock() { return &_extra_data_lock; }
2615 };
2616 
2617 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP