< prev index next >

src/hotspot/share/oops/cpCache.hpp

Print this page




 119 //
 120 // Note: invokevirtual & invokespecial bytecodes can share the same constant
 121 //       pool entry and thus the same constant pool cache entry. All invoke
 122 //       bytecodes but invokevirtual use only _f1 and the corresponding b1
 123 //       bytecode, while invokevirtual uses only _f2 and the corresponding
 124 //       b2 bytecode.  The value of _flags is shared for both types of entries.
 125 //
 126 // The fields are volatile so that they are stored in the order written in the
 127 // source code.  The _indices field with the bytecode must be written last.
 128 
 129 class CallInfo;
 130 
 131 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
 132   friend class VMStructs;
 133   friend class constantPoolCacheKlass;
 134   friend class ConstantPool;
 135   friend class InterpreterRuntime;
 136 
 137  private:
 138   volatile intx     _indices;  // constant pool index & rewrite bytecodes
 139   volatile Metadata*   _f1;       // entry specific metadata field
 140   volatile intx        _f2;       // entry specific int/metadata field
 141   volatile intx     _flags;    // flags
 142 
 143 
 144   void set_bytecode_1(Bytecodes::Code code);
 145   void set_bytecode_2(Bytecodes::Code code);
 146   void set_f1(Metadata* f1) {
 147     Metadata* existing_f1 = (Metadata*)_f1; // read once
 148     assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
 149     _f1 = f1;
 150   }
 151   void release_set_f1(Metadata* f1);
 152   void set_f2(intx f2) {
 153     intx existing_f2 = _f2; // read once
 154     assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change");
 155     _f2 = f2;
 156   }
 157   void set_f2_as_vfinal_method(Method* f2) {
 158     assert(is_vfinal(), "flags must be set");
 159     set_f2((intx)f2);
 160   }
 161   int make_flags(TosState state, int option_bits, int field_index_or_method_params);
 162   void set_flags(intx flags)                     { _flags = flags; }
 163   bool init_flags_atomic(intx flags);
 164   void set_field_flags(TosState field_type, int option_bits, int field_index) {
 165     assert((field_index & field_index_mask) == field_index, "field_index in range");
 166     set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
 167   }


 315       case Bytecodes::_invokeinterface : return 1;
 316       case Bytecodes::_putstatic       :    // fall through
 317       case Bytecodes::_putfield        :    // fall through
 318       case Bytecodes::_invokevirtual   : return 2;
 319       default                          : break;
 320     }
 321     return -1;
 322   }
 323 
 324   // Has this bytecode been resolved? Only valid for invokes and get/put field/static.
 325   bool is_resolved(Bytecodes::Code code) const {
 326     switch (bytecode_number(code)) {
 327       case 1:  return (bytecode_1() == code);
 328       case 2:  return (bytecode_2() == code);
 329     }
 330     return false;      // default: not resolved
 331   }
 332 
 333   // Accessors
 334   int indices() const                            { return _indices; }
 335   int indices_ord() const                        { return (intx)OrderAccess::load_ptr_acquire(&_indices); }
 336   int constant_pool_index() const                { return (indices() & cp_index_mask); }
 337   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
 338   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
 339   Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); }
 340   Method*   f1_as_method() const                 { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
 341   Klass*    f1_as_klass() const                  { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
 342   // Use the accessor f1() to acquire _f1's value. This is needed for
 343   // example in BytecodeInterpreter::run(), where is_f1_null() is
 344   // called to check if an invokedynamic call is resolved. This load
 345   // of _f1 must be ordered with the loads performed by
 346   // cache->main_entry_index().
 347   bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
 348   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
 349   Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
 350   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
 351   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
 352   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
 353   bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
 354   bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
 355   bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
 356   bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
 357   bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
 358   bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
 359   bool is_field_entry() const                    { return (_flags & (1 << is_field_entry_shift))    != 0; }




 119 //
 120 // Note: invokevirtual & invokespecial bytecodes can share the same constant
 121 //       pool entry and thus the same constant pool cache entry. All invoke
 122 //       bytecodes but invokevirtual use only _f1 and the corresponding b1
 123 //       bytecode, while invokevirtual uses only _f2 and the corresponding
 124 //       b2 bytecode.  The value of _flags is shared for both types of entries.
 125 //
 126 // The fields are volatile so that they are stored in the order written in the
 127 // source code.  The _indices field with the bytecode must be written last.
 128 
 129 class CallInfo;
 130 
 131 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
 132   friend class VMStructs;
 133   friend class constantPoolCacheKlass;
 134   friend class ConstantPool;
 135   friend class InterpreterRuntime;
 136 
 137  private:
 138   volatile intx     _indices;  // constant pool index & rewrite bytecodes
 139   Metadata* volatile   _f1;       // entry specific metadata field
 140   volatile intx        _f2;       // entry specific int/metadata field
 141   volatile intx     _flags;    // flags
 142 
 143 
 144   void set_bytecode_1(Bytecodes::Code code);
 145   void set_bytecode_2(Bytecodes::Code code);
 146   void set_f1(Metadata* f1) {
 147     Metadata* existing_f1 = _f1; // read once
 148     assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
 149     _f1 = f1;
 150   }
 151   void release_set_f1(Metadata* f1);
 152   void set_f2(intx f2) {
 153     intx existing_f2 = _f2; // read once
 154     assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change");
 155     _f2 = f2;
 156   }
 157   void set_f2_as_vfinal_method(Method* f2) {
 158     assert(is_vfinal(), "flags must be set");
 159     set_f2((intx)f2);
 160   }
 161   int make_flags(TosState state, int option_bits, int field_index_or_method_params);
 162   void set_flags(intx flags)                     { _flags = flags; }
 163   bool init_flags_atomic(intx flags);
 164   void set_field_flags(TosState field_type, int option_bits, int field_index) {
 165     assert((field_index & field_index_mask) == field_index, "field_index in range");
 166     set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
 167   }


 315       case Bytecodes::_invokeinterface : return 1;
 316       case Bytecodes::_putstatic       :    // fall through
 317       case Bytecodes::_putfield        :    // fall through
 318       case Bytecodes::_invokevirtual   : return 2;
 319       default                          : break;
 320     }
 321     return -1;
 322   }
 323 
 324   // Has this bytecode been resolved? Only valid for invokes and get/put field/static.
 325   bool is_resolved(Bytecodes::Code code) const {
 326     switch (bytecode_number(code)) {
 327       case 1:  return (bytecode_1() == code);
 328       case 2:  return (bytecode_2() == code);
 329     }
 330     return false;      // default: not resolved
 331   }
 332 
 333   // Accessors
 334   int indices() const                            { return _indices; }
 335   int indices_ord() const                        { return OrderAccess::load_acquire(&_indices); }
 336   int constant_pool_index() const                { return (indices() & cp_index_mask); }
 337   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
 338   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
 339   Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_acquire(&_f1); }
 340   Method*   f1_as_method() const                 { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
 341   Klass*    f1_as_klass() const                  { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
 342   // Use the accessor f1() to acquire _f1's value. This is needed for
 343   // example in BytecodeInterpreter::run(), where is_f1_null() is
 344   // called to check if an invokedynamic call is resolved. This load
 345   // of _f1 must be ordered with the loads performed by
 346   // cache->main_entry_index().
 347   bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
 348   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
 349   Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
 350   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
 351   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
 352   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
 353   bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
 354   bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
 355   bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
 356   bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
 357   bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
 358   bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
 359   bool is_field_entry() const                    { return (_flags & (1 << is_field_entry_shift))    != 0; }


< prev index next >