1 /*
   2  * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_CPCACHEOOP_HPP
  26 #define SHARE_VM_OOPS_CPCACHEOOP_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "oops/array.hpp"
  31 #include "oops/oopHandle.hpp"
  32 #include "runtime/orderAccess.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/constantTag.hpp"
  35 
  36 class PSPromotionManager;
  37 
  38 // The ConstantPoolCache is not a cache! It is the resolution table that the
  39 // interpreter uses to avoid going into the runtime and a way to access resolved
  40 // values.
  41 
  42 // A ConstantPoolCacheEntry describes an individual entry of the constant
  43 // pool cache. There's 2 principal kinds of entries: field entries for in-
  44 // stance & static field access, and method entries for invokes. Some of
  45 // the entry layout is shared and looks as follows:
  46 //
  47 // bit number |31                0|
  48 // bit length |-8--|-8--|---16----|
  49 // --------------------------------
  50 // _indices   [ b2 | b1 |  index  ]  index = constant_pool_index
  51 // _f1        [  entry specific   ]  metadata ptr (method or klass)
  52 // _f2        [  entry specific   ]  vtable or res_ref index, or vfinal method ptr
  53 // _flags     [tos|0|F=1|0|0|i|f|v|0 |0000|field_index] (for field entries)
  54 // bit length [ 4 |1| 1 |1|1|1|1|1|1 |1     |-3-|----16-----]
  55 // _flags     [tos|0|F=0|M|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
  56 // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]
  57 
  58 // --------------------------------
  59 //
  60 // with:
  61 // index  = original constant pool index
  62 // b1     = bytecode 1
  63 // b2     = bytecode 2
  64 // psize  = parameters size (method entries only)
  65 // field_index = index into field information in holder InstanceKlass
  66 //          The index max is 0xffff (max number of fields in constant pool)
  67 //          and is multiplied by (InstanceKlass::next_offset) when accessing.
  68 // tos    = TosState
  69 // F      = the entry is for a field (or F=0 for a method)
  70 // A      = call site has an appendix argument (loaded from resolved references)
  71 // I      = interface call is forced virtual (must use a vtable index or vfinal)
  72 // f      = field or method is final
  73 // v      = field is volatile
  74 // vf     = virtual but final (method entries only: is_vfinal())
  75 // indy_rf = call site specifier method resolution failed
  76 //
  77 // The flags after TosState have the following interpretation:
  78 // bit 27: 0 for fields, 1 for methods
  79 // i  flag true if field is inlined (flatten)
  80 // f  flag true if field is marked final
  81 // v  flag true if field is volatile (only for fields)
  82 // f2 flag true if f2 contains an oop (e.g., virtual final method)
  83 // fv flag true if invokeinterface used for method in class Object
  84 //
  85 // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 16 with the
  86 // following mapping to the TosState states:
  87 //
  88 // btos: 0
  89 // ztos: 1
  90 // ctos: 2
  91 // stos: 3
  92 // itos: 4
  93 // ltos: 5
  94 // ftos: 6
  95 // dtos: 7
  96 // atos: 8
  97 // qtos: 9
  98 // vtos: 10
  99 //
 100 // Entry specific: field entries:
 101 // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index
 102 // _f1      = field holder (as a java.lang.Class, not a Klass*)
 103 // _f2      = field offset in bytes
 104 // _flags   = field type information, original FieldInfo index in field holder
 105 //            (field_index section)
 106 //
 107 // Entry specific: method entries:
 108 // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section),
 109 //            original constant pool index
 110 // _f1      = Method* for non-virtual calls, unused by virtual calls.
 111 //            for interface calls, which are essentially virtual but need a klass,
 112 //            contains Klass* for the corresponding interface.
 113 //            for invokedynamic and invokehandle, f1 contains the adapter method which
 114 //            manages the actual call. The appendix is stored in the ConstantPool
 115 //            resolved_references array.
 116 //            (upcoming metadata changes will move the appendix to a separate array)
 117 // _f2      = vtable/itable index (or final Method*) for virtual calls only,
 118 //            unused by non-virtual.  The is_vfinal flag indicates this is a
 119 //            method pointer for a final method, not an index.
 120 // _flags   = method type info (t section),
 121 //            virtual final bit (vfinal),
 122 //            parameter size (psize section)
 123 //
 124 // Note: invokevirtual & invokespecial bytecodes can share the same constant
 125 //       pool entry and thus the same constant pool cache entry. All invoke
 126 //       bytecodes but invokevirtual use only _f1 and the corresponding b1
 127 //       bytecode, while invokevirtual uses only _f2 and the corresponding
 128 //       b2 bytecode.  The value of _flags is shared for both types of entries.
 129 //
 130 // The fields are volatile so that they are stored in the order written in the
 131 // source code.  The _indices field with the bytecode must be written last.
 132 
 133 class CallInfo;
 134 
 135 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
 136   friend class VMStructs;
 137   friend class constantPoolCacheKlass;
 138   friend class ConstantPool;
 139   friend class InterpreterRuntime;
 140 
 141  private:
 142   volatile intx     _indices;  // constant pool index & rewrite bytecodes
 143   Metadata* volatile   _f1;       // entry specific metadata field
 144   volatile intx        _f2;       // entry specific int/metadata field
 145   volatile intx     _flags;    // flags
 146 
 147 
 148   void set_bytecode_1(Bytecodes::Code code);
 149   void set_bytecode_2(Bytecodes::Code code);
 150   void set_f1(Metadata* f1) {
 151     Metadata* existing_f1 = _f1; // read once
 152     assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
 153     _f1 = f1;
 154   }
 155   void release_set_f1(Metadata* f1);
 156   void set_f2(intx f2) {
 157     intx existing_f2 = _f2; // read once
 158     assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change");
 159     _f2 = f2;
 160   }
 161   void set_f2_as_vfinal_method(Method* f2) {
 162     assert(is_vfinal(), "flags must be set");
 163     set_f2((intx)f2);
 164   }
 165   int make_flags(TosState state, int option_bits, int field_index_or_method_params);
 166   void set_flags(intx flags)                     { _flags = flags; }
 167   void set_field_flags(TosState field_type, int option_bits, int field_index) {
 168     assert((field_index & field_index_mask) == field_index, "field_index in range");
 169     set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
 170   }
 171   void set_method_flags(TosState return_type, int option_bits, int method_params) {
 172     assert((method_params & parameter_size_mask) == method_params, "method_params in range");
 173     set_flags(make_flags(return_type, option_bits, method_params));
 174   }
 175 
 176  public:
 177   // specific bit definitions for the flags field:
 178   // (Note: the interpreter must use these definitions to access the CP cache.)
 179   enum {
 180     // high order bits are the TosState corresponding to field type or method return type
 181     tos_state_bits             = 4,
 182     tos_state_mask             = right_n_bits(tos_state_bits),
 183     tos_state_shift            = BitsPerInt - tos_state_bits,  // see verify_tos_state_shift below
 184     // misc. option bits; can be any bit position in [16..27]
 185     is_field_entry_shift       = 26,  // (F) is it a field or a method?
 186     has_method_type_shift      = 25,  // (M) does the call site have a MethodType?
 187     has_appendix_shift         = 24,  // (A) does the call site have an appendix argument?
 188     is_forced_virtual_shift    = 23,  // (I) is the interface reference forced to virtual mode?
 189     is_flatten_field           = 23,  // (i) is the value field flatten?
 190     is_final_shift             = 22,  // (f) is the field or method final?
 191     is_volatile_shift          = 21,  // (v) is the field volatile?
 192     is_vfinal_shift            = 20,  // (vf) did the call resolve to a final method?
 193     indy_resolution_failed_shift= 19, // (indy_rf) did call site specifier resolution fail ?
 194     // low order bits give field index (for FieldInfo) or method parameter size:
 195     field_index_bits           = 16,
 196     field_index_mask           = right_n_bits(field_index_bits),
 197     parameter_size_bits        = 8,  // subset of field_index_mask, range is 0..255
 198     parameter_size_mask        = right_n_bits(parameter_size_bits),
 199     option_bits_mask           = ~(((~0u) << tos_state_shift) | (field_index_mask | parameter_size_mask))
 200   };
 201 
 202   // specific bit definitions for the indices field:
 203   enum {
 204     cp_index_bits              = 2*BitsPerByte,
 205     cp_index_mask              = right_n_bits(cp_index_bits),
 206     bytecode_1_shift           = cp_index_bits,
 207     bytecode_1_mask            = right_n_bits(BitsPerByte), // == (u1)0xFF
 208     bytecode_2_shift           = cp_index_bits + BitsPerByte,
 209     bytecode_2_mask            = right_n_bits(BitsPerByte)  // == (u1)0xFF
 210   };
 211 
 212 
 213   // Initialization
 214   void initialize_entry(int original_index);     // initialize primary entry
 215   void initialize_resolved_reference_index(int ref_index) {
 216     assert(_f2 == 0, "set once");  // note: ref_index might be zero also
 217     _f2 = ref_index;
 218   }
 219 
 220   void set_field(                                // sets entry to resolved field state
 221     Bytecodes::Code get_code,                    // the bytecode used for reading the field
 222     Bytecodes::Code put_code,                    // the bytecode used for writing the field
 223     Klass*          field_holder,                // the object/klass holding the field
 224     int             orig_field_index,            // the original field index in the field holder
 225     int             field_offset,                // the field offset in words in the field holder
 226     TosState        field_type,                  // the (machine) field type
 227     bool            is_final,                    // the field is final
 228     bool            is_volatile,                 // the field is volatile
 229     bool            is_flatten,                  // the field is flatten (value field)
 230     Klass*          root_klass                   // needed by the GC to dirty the klass
 231   );
 232 
 233  private:
 234   void set_direct_or_vtable_call(
 235     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
 236     const methodHandle& method,                  // the method/prototype if any (NULL, otherwise)
 237     int             vtable_index,                // the vtable index if any, else negative
 238     bool            sender_is_interface
 239   );
 240 
 241  public:
 242   void set_direct_call(                          // sets entry to exact concrete method entry
 243     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
 244     const methodHandle& method,                  // the method to call
 245     bool            sender_is_interface
 246   );
 247 
 248   void set_vtable_call(                          // sets entry to vtable index
 249     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
 250     const methodHandle& method,                  // resolved method which declares the vtable index
 251     int             vtable_index                 // the vtable index
 252   );
 253 
 254   void set_itable_call(
 255     Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
 256     Klass* referenced_klass,                     // the referenced klass in the InterfaceMethodref
 257     const methodHandle& method,                  // the resolved interface method
 258     int itable_index                             // index into itable for the method
 259   );
 260 
 261   void set_method_handle(
 262     const constantPoolHandle& cpool,             // holding constant pool (required for locking)
 263     const CallInfo &call_info                    // Call link information
 264   );
 265 
 266   void set_dynamic_call(
 267     const constantPoolHandle& cpool,             // holding constant pool (required for locking)
 268     const CallInfo &call_info                    // Call link information
 269   );
 270 
 271   // Common code for invokedynamic and MH invocations.
 272 
 273   // The "appendix" is an optional call-site-specific parameter which is
 274   // pushed by the JVM at the end of the argument list.  This argument may
 275   // be a MethodType for the MH.invokes and a CallSite for an invokedynamic
 276   // instruction.  However, its exact type and use depends on the Java upcall,
 277   // which simply returns a compiled LambdaForm along with any reference
 278   // that LambdaForm needs to complete the call.  If the upcall returns a
 279   // null appendix, the argument is not passed at all.
 280   //
 281   // The appendix is *not* represented in the signature of the symbolic
 282   // reference for the call site, but (if present) it *is* represented in
 283   // the Method* bound to the site.  This means that static and dynamic
 284   // resolution logic needs to make slightly different assessments about the
 285   // number and types of arguments.
 286   void set_method_handle_common(
 287     const constantPoolHandle& cpool,                    // holding constant pool (required for locking)
 288     Bytecodes::Code invoke_code,                 // _invokehandle or _invokedynamic
 289     const CallInfo &call_info                    // Call link information
 290   );
 291 
 292   // Return TRUE if resolution failed and this thread got to record the failure
 293   // status.  Return FALSE if another thread succeeded or failed in resolving
 294   // the method and recorded the success or failure before this thread had a
 295   // chance to record its failure.
 296   bool save_and_throw_indy_exc(const constantPoolHandle& cpool, int cpool_index,
 297                                int index, constantTag tag, TRAPS);
 298 
 299   // invokedynamic and invokehandle call sites have two entries in the
 300   // resolved references array:
 301   //   appendix   (at index+0)
 302   //   MethodType (at index+1)
 303   enum {
 304     _indy_resolved_references_appendix_offset    = 0,
 305     _indy_resolved_references_method_type_offset = 1,
 306     _indy_resolved_references_entries
 307   };
 308 
 309   Method*      method_if_resolved(const constantPoolHandle& cpool);
 310   oop        appendix_if_resolved(const constantPoolHandle& cpool);
 311   oop     method_type_if_resolved(const constantPoolHandle& cpool);
 312 
 313   void set_parameter_size(int value);
 314 
 315   // Which bytecode number (1 or 2) in the index field is valid for this bytecode?
 316   // Returns -1 if neither is valid.
 317   static int bytecode_number(Bytecodes::Code code) {
 318     switch (code) {
 319       case Bytecodes::_getstatic       :    // fall through
 320       case Bytecodes::_getfield        :    // fall through
 321       case Bytecodes::_invokespecial   :    // fall through
 322       case Bytecodes::_invokestatic    :    // fall through
 323       case Bytecodes::_invokehandle    :    // fall through
 324       case Bytecodes::_invokedynamic   :    // fall through
 325       case Bytecodes::_invokeinterface : return 1;
 326       case Bytecodes::_putstatic       :    // fall through
 327       case Bytecodes::_putfield        :    // fall through
 328       case Bytecodes::_vwithfield      :    // fall through
 329       case Bytecodes::_invokevirtual   : return 2;
 330       default                          : break;
 331     }
 332     return -1;
 333   }
 334 
 335   // Has this bytecode been resolved? Only valid for invokes and get/put field/static.
 336   bool is_resolved(Bytecodes::Code code) const {
 337     switch (bytecode_number(code)) {
 338       case 1:  return (bytecode_1() == code);
 339       case 2:  return (bytecode_2() == code);
 340     }
 341     return false;      // default: not resolved
 342   }
 343 
 344   // Accessors
 345   int indices() const                            { return _indices; }
 346   int indices_ord() const                        { return OrderAccess::load_acquire(&_indices); }
 347   int constant_pool_index() const                { return (indices() & cp_index_mask); }
 348   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
 349   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
 350   Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_acquire(&_f1); }
 351   Method*   f1_as_method() const                 { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
 352   Klass*    f1_as_klass() const                  { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
 353   // Use the accessor f1() to acquire _f1's value. This is needed for
 354   // example in BytecodeInterpreter::run(), where is_f1_null() is
 355   // called to check if an invokedynamic call is resolved. This load
 356   // of _f1 must be ordered with the loads performed by
 357   // cache->main_entry_index().
 358   bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
 359   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
 360   Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
 361   Method*   f2_as_interface_method() const       { assert(bytecode_1() == Bytecodes::_invokeinterface, ""); return (Method*)_f2; }
 362   int       f2_as_offset() const                 { assert(is_field_entry(),  ""); return (int)_f2; }
 363   intx flags_ord() const                         { return (intx)OrderAccess::load_acquire(&_flags); }
 364   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
 365   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
 366   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
 367   bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
 368   bool is_flatten() const                        { return  (_flags & (1 << is_flatten_field))       != 0; }
 369   bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
 370   bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
 371   bool indy_resolution_failed() const            { intx flags = flags_ord(); return (flags & (1 << indy_resolution_failed_shift)) != 0; }
 372   bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
 373   bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
 374   bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
 375   bool is_field_entry() const                    { return (_flags & (1 << is_field_entry_shift))    != 0; }
 376   bool is_long() const                           { return flag_state() == ltos; }
 377   bool is_double() const                         { return flag_state() == dtos; }
 378   bool is_valuetype() const                      { return flag_state() == qtos; }
 379   TosState flag_state() const                    { assert((uint)number_of_states <= (uint)tos_state_mask+1, "");
 380                                                    return (TosState)((_flags >> tos_state_shift) & tos_state_mask); }
 381   void set_indy_resolution_failed();
 382 
 383   // Code generation support
 384   static WordSize size()                         {
 385     return in_WordSize(align_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize);
 386   }
 387   static ByteSize size_in_bytes()                { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
 388   static ByteSize indices_offset()               { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
 389   static ByteSize f1_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
 390   static ByteSize f2_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
 391   static ByteSize flags_offset()                 { return byte_offset_of(ConstantPoolCacheEntry, _flags); }
 392 
 393 #if INCLUDE_JVMTI
 394   // RedefineClasses() API support:
 395   // If this ConstantPoolCacheEntry refers to old_method then update it
 396   // to refer to new_method.
 397   // trace_name_printed is set to true if the current call has
 398   // printed the klass name so that other routines in the adjust_*
 399   // group don't print the klass name.
 400   void adjust_method_entry(Method* old_method, Method* new_method,
 401          bool* trace_name_printed);
 402   bool check_no_old_or_obsolete_entries();
 403   Method* get_interesting_method_entry(Klass* k);
 404 #endif // INCLUDE_JVMTI
 405 
 406   // Debugging & Printing
 407   void print (outputStream* st, int index) const;
 408   void verify(outputStream* st) const;
 409 
 410   static void verify_tos_state_shift() {
 411     // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state:
 412     assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask");
 413   }
 414 
 415   void verify_just_initialized(bool f2_used);
 416   void reinitialize(bool f2_used);
 417 };
 418 
 419 
 420 // A constant pool cache is a runtime data structure set aside to a constant pool. The cache
 421 // holds interpreter runtime information for all field access and invoke bytecodes. The cache
 422 // is created and initialized before a class is actively used (i.e., initialized), the indivi-
 423 // dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
 424 
 425 class ConstantPoolCache: public MetaspaceObj {
 426   friend class VMStructs;
 427   friend class MetadataFactory;
 428  private:
 429   // If you add a new field that points to any metaspace object, you
 430   // must add this field to ConstantPoolCache::metaspace_pointers_do().
 431   int             _length;
 432   ConstantPool*   _constant_pool;          // the corresponding constant pool
 433 
 434   // The following fields need to be modified at runtime, so they cannot be
 435   // stored in the ConstantPool, which is read-only.
 436   // Array of resolved objects from the constant pool and map from resolved
 437   // object index to original constant pool index
 438   OopHandle            _resolved_references;
 439   Array<u2>*           _reference_map;
 440   // The narrowOop pointer to the archived resolved_references. Set at CDS dump
 441   // time when caching java heap object is supported.
 442   CDS_JAVA_HEAP_ONLY(narrowOop _archived_references;)
 443 
 444   // Sizing
 445   debug_only(friend class ClassVerifier;)
 446 
 447   // Constructor
 448   ConstantPoolCache(int length,
 449                     const intStack& inverse_index_map,
 450                     const intStack& invokedynamic_inverse_index_map,
 451                     const intStack& invokedynamic_references_map) :
 452                           _length(length),
 453                           _constant_pool(NULL) {
 454     CDS_JAVA_HEAP_ONLY(_archived_references = 0;)
 455     initialize(inverse_index_map, invokedynamic_inverse_index_map,
 456                invokedynamic_references_map);
 457     for (int i = 0; i < length; i++) {
 458       assert(entry_at(i)->is_f1_null(), "Failed to clear?");
 459     }
 460   }
 461 
 462   // Initialization
 463   void initialize(const intArray& inverse_index_map,
 464                   const intArray& invokedynamic_inverse_index_map,
 465                   const intArray& invokedynamic_references_map);
 466  public:
 467   static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
 468                                      const intStack& cp_cache_map,
 469                                      const intStack& invokedynamic_cp_cache_map,
 470                                      const intStack& invokedynamic_references_map, TRAPS);
 471   bool is_constantPoolCache() const { return true; }
 472 
 473   int length() const                      { return _length; }
 474   void metaspace_pointers_do(MetaspaceClosure* it);
 475   MetaspaceObj::Type type() const         { return ConstantPoolCacheType; }
 476 
 477   oop  archived_references() NOT_CDS_JAVA_HEAP_RETURN_(NULL);
 478   void set_archived_references(oop o) NOT_CDS_JAVA_HEAP_RETURN;
 479 
 480   oop resolved_references()                 { return _resolved_references.resolve(); }
 481   void set_resolved_references(OopHandle s) { _resolved_references = s; }
 482   Array<u2>* reference_map() const        { return _reference_map; }
 483   void set_reference_map(Array<u2>* o)    { _reference_map = o; }
 484 
 485   // Assembly code support
 486   static int resolved_references_offset_in_bytes() { return offset_of(ConstantPoolCache, _resolved_references); }
 487 
 488   // CDS support
 489   void remove_unshareable_info();
 490   void verify_just_initialized();
 491  private:
 492   void walk_entries_for_initialization(bool check_only);
 493   void set_length(int length)                    { _length = length; }
 494 
 495   static int header_size()                       { return sizeof(ConstantPoolCache) / wordSize; }
 496   static int size(int length)                    { return align_metadata_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
 497  public:
 498   int size() const                               { return size(length()); }
 499  private:
 500 
 501   // Helpers
 502   ConstantPool**        constant_pool_addr()     { return &_constant_pool; }
 503   ConstantPoolCacheEntry* base() const           { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
 504 
 505   friend class constantPoolCacheKlass;
 506   friend class ConstantPoolCacheEntry;
 507 
 508  public:
 509   // Accessors
 510   void set_constant_pool(ConstantPool* pool)   { _constant_pool = pool; }
 511   ConstantPool* constant_pool() const          { return _constant_pool; }
 512   // Fetches the entry at the given index.
 513   // In either case the index must not be encoded or byte-swapped in any way.
 514   ConstantPoolCacheEntry* entry_at(int i) const {
 515     assert(0 <= i && i < length(), "index out of bounds");
 516     return base() + i;
 517   }
 518 
 519   // Code generation
 520   static ByteSize base_offset()                  { return in_ByteSize(sizeof(ConstantPoolCache)); }
 521   static ByteSize entry_offset(int raw_index) {
 522     int index = raw_index;
 523     return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
 524   }
 525 
 526 #if INCLUDE_JVMTI
 527   // RedefineClasses() API support:
 528   // If any entry of this ConstantPoolCache points to any of
 529   // old_methods, replace it with the corresponding new_method.
 530   // trace_name_printed is set to true if the current call has
 531   // printed the klass name so that other routines in the adjust_*
 532   // group don't print the klass name.
 533   void adjust_method_entries(InstanceKlass* holder, bool* trace_name_printed);
 534   bool check_no_old_or_obsolete_entries();
 535   void dump_cache();
 536 #endif // INCLUDE_JVMTI
 537 
 538   // RedefineClasses support
 539   DEBUG_ONLY(bool on_stack() { return false; })
 540   void deallocate_contents(ClassLoaderData* data);
 541   bool is_klass() const { return false; }
 542 
 543   // Printing
 544   void print_on(outputStream* st) const;
 545   void print_value_on(outputStream* st) const;
 546 
 547   const char* internal_name() const { return "{constant pool cache}"; }
 548 
 549   // Verify
 550   void verify_on(outputStream* st);
 551 };
 552 
 553 #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP