< prev index next >

src/hotspot/share/oops/methodData.hpp

Print this page




  66 // and the lattice quickly "bottoms out" in a state where all counters
  67 // are taken to be indefinitely large.
  68 //
  69 // The reader will find many data races in profile gathering code, starting
  70 // with invocation counter incrementation.  None of these races harm correct
  71 // execution of the compiled code.
  72 
  73 // forward decl
  74 class ProfileData;
  75 
  76 // DataLayout
  77 //
  78 // Overlay for generic profiling data.
  79 class DataLayout {
  80   friend class VMStructs;
  81   friend class JVMCIVMStructs;
  82 
  83 private:
  84   // Every data layout begins with a header.  This header
  85   // contains a tag, which is used to indicate the size/layout
  86   // of the data, 4 bits of flags, which can be used in any way,
  87   // 4 bits of trap history (none/one reason/many reasons),
  88   // and a bci, which is used to tie this piece of data to a
  89   // specific bci in the bytecodes.
  90   union {
  91     intptr_t _bits;
  92     struct {
  93       u1 _tag;
  94       u1 _flags;
  95       u2 _bci;

  96     } _struct;
  97   } _header;
  98 
  99   // The data layout has an arbitrary number of cells, each sized
 100   // to accomodate a pointer or an integer.
 101   intptr_t _cells[1];
 102 
 103   // Some types of data layouts need a length field.
 104   static bool needs_array_len(u1 tag);
 105 
 106 public:
 107   enum {
 108     counter_increment = 1
 109   };
 110 
 111   enum {
 112     cell_size = sizeof(intptr_t)
 113   };
 114 
 115   // Tag values
 116   enum {
 117     no_tag,
 118     bit_data_tag,
 119     counter_data_tag,
 120     jump_data_tag,
 121     receiver_type_data_tag,
 122     virtual_call_data_tag,
 123     ret_data_tag,
 124     branch_data_tag,
 125     multi_branch_data_tag,
 126     arg_info_data_tag,
 127     call_type_data_tag,
 128     virtual_call_type_data_tag,
 129     parameters_type_data_tag,
 130     speculative_trap_data_tag
 131   };
 132 
 133   enum {
 134     // The _struct._flags word is formatted as [trap_state:4 | flags:4].
 135     // The trap state breaks down further as [recompile:1 | reason:3].
 136     // This further breakdown is defined in deoptimization.cpp.
 137     // See Deoptimization::trap_state_reason for an assert that
 138     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 139     //
 140     // The trap_state is collected only if ProfileTraps is true.
 141     trap_bits = 1+3,  // 3: enough to distinguish [0..Reason_RECORDED_LIMIT].
 142     trap_shift = BitsPerByte - trap_bits,
 143     trap_mask = right_n_bits(trap_bits),
 144     trap_mask_in_place = (trap_mask << trap_shift),
 145     flag_limit = trap_shift,
 146     flag_mask = right_n_bits(flag_limit),
 147     first_flag = 0
 148   };
 149 
 150   // Size computation
 151   static int header_size_in_bytes() {
 152     return cell_size;
 153   }
 154   static int header_size_in_cells() {
 155     return 1;
 156   }
 157 
 158   static int compute_size_in_bytes(int cell_count) {
 159     return header_size_in_bytes() + cell_count * cell_size;
 160   }
 161 
 162   // Initialization
 163   void initialize(u1 tag, u2 bci, int cell_count);
 164 
 165   // Accessors
 166   u1 tag() {
 167     return _header._struct._tag;
 168   }
 169 
 170   // Return a few bits of trap state.  Range is [0..trap_mask].
 171   // The state tells if traps with zero, one, or many reasons have occurred.
 172   // It also tells whether zero or many recompilations have occurred.
 173   // The associated trap histogram in the MDO itself tells whether
 174   // traps are common or not.  If a BCI shows that a trap X has
 175   // occurred, and the MDO shows N occurrences of X, we make the
 176   // simplifying assumption that all N occurrences can be blamed
 177   // on that BCI.
 178   int trap_state() const {
 179     return ((_header._struct._flags >> trap_shift) & trap_mask);
 180   }
 181 
 182   void set_trap_state(int new_state) {
 183     assert(ProfileTraps, "used only under +ProfileTraps");
 184     uint old_flags = (_header._struct._flags & flag_mask);
 185     _header._struct._flags = (new_state << trap_shift) | old_flags;
 186   }
 187 
 188   u1 flags() const {
 189     return _header._struct._flags;
 190   }
 191 
 192   u2 bci() const {
 193     return _header._struct._bci;
 194   }
 195 
 196   void set_header(intptr_t value) {
 197     _header._bits = value;
 198   }
 199   intptr_t header() {
 200     return _header._bits;
 201   }
 202   void set_cell_at(int index, intptr_t value) {
 203     _cells[index] = value;
 204   }
 205   void release_set_cell_at(int index, intptr_t value);
 206   intptr_t cell_at(int index) const {
 207     return _cells[index];
 208   }
 209 
 210   void set_flag_at(int flag_number) {
 211     assert(flag_number < flag_limit, "oob");
 212     _header._struct._flags |= (0x1 << flag_number);
 213   }
 214   bool flag_at(int flag_number) const {
 215     assert(flag_number < flag_limit, "oob");
 216     return (_header._struct._flags & (0x1 << flag_number)) != 0;
 217   }
 218 
 219   // Low-level support for code generation.
 220   static ByteSize header_offset() {
 221     return byte_offset_of(DataLayout, _header);
 222   }
 223   static ByteSize tag_offset() {
 224     return byte_offset_of(DataLayout, _header._struct._tag);
 225   }
 226   static ByteSize flags_offset() {
 227     return byte_offset_of(DataLayout, _header._struct._flags);
 228   }
 229   static ByteSize bci_offset() {
 230     return byte_offset_of(DataLayout, _header._struct._bci);
 231   }
 232   static ByteSize cell_offset(int index) {
 233     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 234   }
 235 #ifdef CC_INTERP
 236   static int cell_offset_in_bytes(int index) {
 237     return (int)offset_of(DataLayout, _cells[index]);
 238   }
 239 #endif // CC_INTERP
 240   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 241   static int flag_number_to_byte_constant(int flag_number) {
 242     assert(0 <= flag_number && flag_number < flag_limit, "oob");
 243     DataLayout temp; temp.set_header(0);
 244     temp.set_flag_at(flag_number);
 245     return temp._header._struct._flags;
 246   }
 247   // Return a value which, when or-ed as a word into _header, sets the flag.
 248   static intptr_t flag_mask_to_header_mask(int byte_constant) {
 249     DataLayout temp; temp.set_header(0);
 250     temp._header._struct._flags = byte_constant;
 251     return temp._header._bits;
 252   }
 253 
 254   ProfileData* data_in();
 255 
 256   // GC support
 257   void clean_weak_klass_links(bool always_clean);
 258 
 259   // Redefinition support
 260   void clean_weak_method_links();
 261   DEBUG_ONLY(void verify_clean_weak_method_links();)
 262 };
 263 
 264 
 265 // ProfileData class hierarchy
 266 class ProfileData;
 267 class   BitData;
 268 class     CounterData;


 347     return (int)data()->cell_at(index);
 348   }
 349   void set_oop_at(int index, oop value) {
 350     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 351   }
 352   oop oop_at(int index) const {
 353     return cast_to_oop(intptr_at(index));
 354   }
 355 
 356   void set_flag_at(int flag_number) {
 357     data()->set_flag_at(flag_number);
 358   }
 359   bool flag_at(int flag_number) const {
 360     return data()->flag_at(flag_number);
 361   }
 362 
 363   // two convenient imports for use by subclasses:
 364   static ByteSize cell_offset(int index) {
 365     return DataLayout::cell_offset(index);
 366   }
 367   static int flag_number_to_byte_constant(int flag_number) {
 368     return DataLayout::flag_number_to_byte_constant(flag_number);
 369   }
 370 
 371   ProfileData(DataLayout* data) {
 372     _data = data;
 373   }
 374 
 375 #ifdef CC_INTERP
 376   // Static low level accessors for DataLayout with ProfileData's semantics.
 377 
 378   static int cell_offset_in_bytes(int index) {
 379     return DataLayout::cell_offset_in_bytes(index);
 380   }
 381 
 382   static void increment_uint_at_no_overflow(DataLayout* layout, int index,
 383                                             int inc = DataLayout::counter_increment) {
 384     uint count = ((uint)layout->cell_at(index)) + inc;
 385     if (count == 0) return;
 386     layout->set_cell_at(index, (intptr_t) count);
 387   }
 388 


 557 
 558   virtual int cell_count() const {
 559     return static_cell_count();
 560   }
 561 
 562   // Accessor
 563 
 564   // The null_seen flag bit is specially known to the interpreter.
 565   // Consulting it allows the compiler to avoid setting up null_check traps.
 566   bool null_seen()     { return flag_at(null_seen_flag); }
 567   void set_null_seen()    { set_flag_at(null_seen_flag); }
 568 
 569 #if INCLUDE_JVMCI
 570   // true if an exception was thrown at the specific BCI
 571   bool exception_seen() { return flag_at(exception_seen_flag); }
 572   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 573 #endif
 574 
 575   // Code generation support
 576   static int null_seen_byte_constant() {
 577     return flag_number_to_byte_constant(null_seen_flag);
 578   }
 579 
 580   static ByteSize bit_data_size() {
 581     return cell_offset(bit_cell_count);
 582   }
 583 
 584 #ifdef CC_INTERP
 585   static int bit_data_size_in_bytes() {
 586     return cell_offset_in_bytes(bit_cell_count);
 587   }
 588 
 589   static void set_null_seen(DataLayout* layout) {
 590     set_flag_at(layout, null_seen_flag);
 591   }
 592 
 593   static DataLayout* advance(DataLayout* layout) {
 594     return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
 595   }
 596 #endif // CC_INTERP
 597 


2033   }
2034 };
2035 
2036 // SpeculativeTrapData
2037 //
2038 // A SpeculativeTrapData is used to record traps due to type
2039 // speculation. It records the root of the compilation: that type
2040 // speculation is wrong in the context of one compilation (for
2041 // method1) doesn't mean it's wrong in the context of another one (for
2042 // method2). Type speculation could have more/different data in the
2043 // context of the compilation of method2 and it's worthwhile to try an
2044 // optimization that failed for compilation of method1 in the context
2045 // of compilation of method2.
2046 // Space for SpeculativeTrapData entries is allocated from the extra
2047 // data space in the MDO. If we run out of space, the trap data for
2048 // the ProfileData at that bci is updated.
2049 class SpeculativeTrapData : public ProfileData {
2050 protected:
2051   enum {
2052     speculative_trap_method,



2053     speculative_trap_cell_count
2054   };
2055 public:
2056   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
2057     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
2058   }
2059 
2060   virtual bool is_SpeculativeTrapData() const { return true; }
2061 
2062   static int static_cell_count() {
2063     return speculative_trap_cell_count;
2064   }
2065 
2066   virtual int cell_count() const {
2067     return static_cell_count();
2068   }
2069 
2070   // Direct accessor
2071   Method* method() const {
2072     return (Method*)intptr_at(speculative_trap_method);


2145   Method* _method;
2146 
2147   // Size of this oop in bytes
2148   int _size;
2149 
2150   // Cached hint for bci_to_dp and bci_to_data
2151   int _hint_di;
2152 
2153   Mutex _extra_data_lock;
2154 
2155   MethodData(const methodHandle& method, int size, TRAPS);
2156 public:
2157   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2158   MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2159 
2160   bool is_methodData() const volatile { return true; }
2161   void initialize();
2162 
2163   // Whole-method sticky bits and flags
2164   enum {
2165     _trap_hist_limit    = 23 JVMCI_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
2166     _trap_hist_mask     = max_jubyte,
2167     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2168   }; // Public flag values
2169 private:
2170   uint _nof_decompiles;             // count of all nmethod removals
2171   uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2172   uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2173   union {
2174     intptr_t _align;
2175     u1 _array[JVMCI_ONLY(2 *) _trap_hist_limit];
2176   } _trap_hist;
2177 
2178   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2179   intx              _eflags;          // flags on escape information
2180   intx              _arg_local;       // bit set of non-escaping arguments
2181   intx              _arg_stack;       // bit set of stack-allocatable arguments
2182   intx              _arg_returned;    // bit set of returned arguments
2183 
2184   int _creation_mileage;              // method mileage at MDO creation
2185 




  66 // and the lattice quickly "bottoms out" in a state where all counters
  67 // are taken to be indefinitely large.
  68 //
  69 // The reader will find many data races in profile gathering code, starting
  70 // with invocation counter incrementation.  None of these races harm correct
  71 // execution of the compiled code.
  72 
  73 // forward decl
  74 class ProfileData;
  75 
  76 // DataLayout
  77 //
  78 // Overlay for generic profiling data.
  79 class DataLayout {
  80   friend class VMStructs;
  81   friend class JVMCIVMStructs;
  82 
  83 private:
  84   // Every data layout begins with a header.  This header
  85   // contains a tag, which is used to indicate the size/layout
  86   // of the data, 8 bits of flags, which can be used in any way,
  87   // 32 bits of trap history (none/one reason/many reasons),
  88   // and a bci, which is used to tie this piece of data to a
  89   // specific bci in the bytecodes.
  90   union {
  91     u8 _bits;
  92     struct {
  93       u1 _tag;
  94       u1 _flags;
  95       u2 _bci;
  96       u4 _traps;
  97     } _struct;
  98   } _header;
  99 
 100   // The data layout has an arbitrary number of cells, each sized
 101   // to accomodate a pointer or an integer.
 102   intptr_t _cells[1];
 103 
 104   // Some types of data layouts need a length field.
 105   static bool needs_array_len(u1 tag);
 106 
 107 public:
 108   enum {
 109     counter_increment = 1
 110   };
 111 
 112   enum {
 113     cell_size = sizeof(intptr_t)
 114   };
 115 
 116   // Tag values
 117   enum {
 118     no_tag,
 119     bit_data_tag,
 120     counter_data_tag,
 121     jump_data_tag,
 122     receiver_type_data_tag,
 123     virtual_call_data_tag,
 124     ret_data_tag,
 125     branch_data_tag,
 126     multi_branch_data_tag,
 127     arg_info_data_tag,
 128     call_type_data_tag,
 129     virtual_call_type_data_tag,
 130     parameters_type_data_tag,
 131     speculative_trap_data_tag
 132   };
 133 
 134   enum {
 135     // The trap state breaks down as [recompile:1 | reason:31].

 136     // This further breakdown is defined in deoptimization.cpp.
 137     // See Deoptimization::trap_state_reason for an assert that
 138     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 139     //
 140     // The trap_state is collected only if ProfileTraps is true.
 141     trap_bits = 1+31,  // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].

 142     trap_mask = right_n_bits(trap_bits),



 143     first_flag = 0
 144   };
 145 
 146   // Size computation
 147   static int header_size_in_bytes() {
 148     return header_size_in_cells() * cell_size;
 149   }
 150   static int header_size_in_cells() {
 151     return LP64_ONLY(1) NOT_LP64(2);
 152   }
 153 
 154   static int compute_size_in_bytes(int cell_count) {
 155     return header_size_in_bytes() + cell_count * cell_size;
 156   }
 157 
 158   // Initialization
 159   void initialize(u1 tag, u2 bci, int cell_count);
 160 
 161   // Accessors
 162   u1 tag() {
 163     return _header._struct._tag;
 164   }
 165 
 166   // Return 32 bits of trap state.
 167   // The state tells if traps with zero, one, or many reasons have occurred.
 168   // It also tells whether zero or many recompilations have occurred.
 169   // The associated trap histogram in the MDO itself tells whether
 170   // traps are common or not.  If a BCI shows that a trap X has
 171   // occurred, and the MDO shows N occurrences of X, we make the
 172   // simplifying assumption that all N occurrences can be blamed
 173   // on that BCI.
 174   uint trap_state() const {
 175     return _header._struct._traps;
 176   }
 177 
 178   void set_trap_state(uint new_state) {
 179     assert(ProfileTraps, "used only under +ProfileTraps");
 180     uint old_flags = _header._struct._traps;
 181     _header._struct._traps = new_state | old_flags;
 182   }
 183 
 184   u1 flags() const {
 185     return _header._struct._flags;
 186   }
 187 
 188   u2 bci() const {
 189     return _header._struct._bci;
 190   }
 191 
 192   void set_header(u8 value) {
 193     _header._bits = value;
 194   }
 195   u8 header() {
 196     return _header._bits;
 197   }
 198   void set_cell_at(int index, intptr_t value) {
 199     _cells[index] = value;
 200   }
 201   void release_set_cell_at(int index, intptr_t value);
 202   intptr_t cell_at(int index) const {
 203     return _cells[index];
 204   }
 205 
 206   void set_flag_at(u1 flag_number) {

 207     _header._struct._flags |= (0x1 << flag_number);
 208   }
 209   bool flag_at(u1 flag_number) const {

 210     return (_header._struct._flags & (0x1 << flag_number)) != 0;
 211   }
 212 
 213   // Low-level support for code generation.
 214   static ByteSize header_offset() {
 215     return byte_offset_of(DataLayout, _header);
 216   }
 217   static ByteSize tag_offset() {
 218     return byte_offset_of(DataLayout, _header._struct._tag);
 219   }
 220   static ByteSize flags_offset() {
 221     return byte_offset_of(DataLayout, _header._struct._flags);
 222   }
 223   static ByteSize bci_offset() {
 224     return byte_offset_of(DataLayout, _header._struct._bci);
 225   }
 226   static ByteSize cell_offset(int index) {
 227     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 228   }
 229 #ifdef CC_INTERP
 230   static int cell_offset_in_bytes(int index) {
 231     return (int)offset_of(DataLayout, _cells[index]);
 232   }
 233 #endif // CC_INTERP
 234   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 235   static u1 flag_number_to_constant(u1 flag_number) {

 236     DataLayout temp; temp.set_header(0);
 237     temp.set_flag_at(flag_number);
 238     return temp._header._struct._flags;
 239   }
 240   // Return a value which, when or-ed as a word into _header, sets the flag.
 241   static u8 flag_mask_to_header_mask(uint byte_constant) {
 242     DataLayout temp; temp.set_header(0);
 243     temp._header._struct._flags = byte_constant;
 244     return temp._header._bits;
 245   }
 246 
 247   ProfileData* data_in();
 248 
 249   // GC support
 250   void clean_weak_klass_links(bool always_clean);
 251 
 252   // Redefinition support
 253   void clean_weak_method_links();
 254   DEBUG_ONLY(void verify_clean_weak_method_links();)
 255 };
 256 
 257 
 258 // ProfileData class hierarchy
 259 class ProfileData;
 260 class   BitData;
 261 class     CounterData;


 340     return (int)data()->cell_at(index);
 341   }
 342   void set_oop_at(int index, oop value) {
 343     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 344   }
 345   oop oop_at(int index) const {
 346     return cast_to_oop(intptr_at(index));
 347   }
 348 
 349   void set_flag_at(int flag_number) {
 350     data()->set_flag_at(flag_number);
 351   }
 352   bool flag_at(int flag_number) const {
 353     return data()->flag_at(flag_number);
 354   }
 355 
 356   // two convenient imports for use by subclasses:
 357   static ByteSize cell_offset(int index) {
 358     return DataLayout::cell_offset(index);
 359   }
 360   static int flag_number_to_constant(int flag_number) {
 361     return DataLayout::flag_number_to_constant(flag_number);
 362   }
 363 
 364   ProfileData(DataLayout* data) {
 365     _data = data;
 366   }
 367 
 368 #ifdef CC_INTERP
 369   // Static low level accessors for DataLayout with ProfileData's semantics.
 370 
 371   static int cell_offset_in_bytes(int index) {
 372     return DataLayout::cell_offset_in_bytes(index);
 373   }
 374 
 375   static void increment_uint_at_no_overflow(DataLayout* layout, int index,
 376                                             int inc = DataLayout::counter_increment) {
 377     uint count = ((uint)layout->cell_at(index)) + inc;
 378     if (count == 0) return;
 379     layout->set_cell_at(index, (intptr_t) count);
 380   }
 381 


 550 
 551   virtual int cell_count() const {
 552     return static_cell_count();
 553   }
 554 
 555   // Accessor
 556 
 557   // The null_seen flag bit is specially known to the interpreter.
 558   // Consulting it allows the compiler to avoid setting up null_check traps.
 559   bool null_seen()     { return flag_at(null_seen_flag); }
 560   void set_null_seen()    { set_flag_at(null_seen_flag); }
 561 
 562 #if INCLUDE_JVMCI
 563   // true if an exception was thrown at the specific BCI
 564   bool exception_seen() { return flag_at(exception_seen_flag); }
 565   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 566 #endif
 567 
 568   // Code generation support
 569   static int null_seen_byte_constant() {
 570     return flag_number_to_constant(null_seen_flag);
 571   }
 572 
 573   static ByteSize bit_data_size() {
 574     return cell_offset(bit_cell_count);
 575   }
 576 
 577 #ifdef CC_INTERP
 578   static int bit_data_size_in_bytes() {
 579     return cell_offset_in_bytes(bit_cell_count);
 580   }
 581 
 582   static void set_null_seen(DataLayout* layout) {
 583     set_flag_at(layout, null_seen_flag);
 584   }
 585 
 586   static DataLayout* advance(DataLayout* layout) {
 587     return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
 588   }
 589 #endif // CC_INTERP
 590 


2026   }
2027 };
2028 
2029 // SpeculativeTrapData
2030 //
2031 // A SpeculativeTrapData is used to record traps due to type
2032 // speculation. It records the root of the compilation: that type
2033 // speculation is wrong in the context of one compilation (for
2034 // method1) doesn't mean it's wrong in the context of another one (for
2035 // method2). Type speculation could have more/different data in the
2036 // context of the compilation of method2 and it's worthwhile to try an
2037 // optimization that failed for compilation of method1 in the context
2038 // of compilation of method2.
2039 // Space for SpeculativeTrapData entries is allocated from the extra
2040 // data space in the MDO. If we run out of space, the trap data for
2041 // the ProfileData at that bci is updated.
2042 class SpeculativeTrapData : public ProfileData {
2043 protected:
2044   enum {
2045     speculative_trap_method,
2046 #ifndef _LP64
2047     speculative_trap_padding,
2048 #endif
2049     speculative_trap_cell_count
2050   };
2051 public:
2052   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
2053     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
2054   }
2055 
2056   virtual bool is_SpeculativeTrapData() const { return true; }
2057 
2058   static int static_cell_count() {
2059     return speculative_trap_cell_count;
2060   }
2061 
2062   virtual int cell_count() const {
2063     return static_cell_count();
2064   }
2065 
2066   // Direct accessor
2067   Method* method() const {
2068     return (Method*)intptr_at(speculative_trap_method);


2141   Method* _method;
2142 
2143   // Size of this oop in bytes
2144   int _size;
2145 
2146   // Cached hint for bci_to_dp and bci_to_data
2147   int _hint_di;
2148 
2149   Mutex _extra_data_lock;
2150 
2151   MethodData(const methodHandle& method, int size, TRAPS);
2152 public:
2153   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2154   MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2155 
2156   bool is_methodData() const volatile { return true; }
2157   void initialize();
2158 
2159   // Whole-method sticky bits and flags
2160   enum {
2161     _trap_hist_limit    = 24 JVMCI_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
2162     _trap_hist_mask     = max_jubyte,
2163     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2164   }; // Public flag values
2165 private:
2166   uint _nof_decompiles;             // count of all nmethod removals
2167   uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2168   uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2169   union {
2170     intptr_t _align;
2171     u1 _array[JVMCI_ONLY(2 *) _trap_hist_limit];
2172   } _trap_hist;
2173 
2174   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2175   intx              _eflags;          // flags on escape information
2176   intx              _arg_local;       // bit set of non-escaping arguments
2177   intx              _arg_stack;       // bit set of stack-allocatable arguments
2178   intx              _arg_returned;    // bit set of returned arguments
2179 
2180   int _creation_mileage;              // method mileage at MDO creation
2181 


< prev index next >