< prev index next >

src/hotspot/share/oops/valueKlass.hpp

Print this page




 125 
 126  public:
 127   // Type testing
 128   bool is_value_slow() const        { return true; }
 129 
 130   // Casting from Klass*
 131   static ValueKlass* cast(Klass* k) {
 132     assert(k->is_value(), "cast to ValueKlass");
 133     return (ValueKlass*) k;
 134   }
 135 
 136   // Use this to return the size of an instance in heap words
 137   // Implementation is currently simple because all value types are allocated
 138   // in Java heap like Java objects.
 139   virtual int size_helper() const {
 140     return layout_helper_to_size_helper(layout_helper());
 141   }
 142 
 143   // allocate_instance() allocates a stand alone value in the Java heap
 144   instanceOop allocate_instance(TRAPS);
 145   // allocate_buffered_or_heap_instance() tries to allocate a value in the
 146   // thread local value buffer, if allocation fails, it allocates it in the
 147   // Java heap
 148   instanceOop allocate_buffered_or_heap_instance(bool* in_heap, TRAPS);
 149 
 150   // minimum number of bytes occupied by nonstatic fields, HeapWord aligned or pow2
 151   int raw_value_byte_size() const;
 152 
 153   int first_field_offset() const;
 154 
 155   address data_for_oop(oop o) const {
 156     return ((address) (void*) o) + first_field_offset();
 157   }
 158 
 159   oop oop_for_data(address data) const {
 160     oop o = (oop) (data - first_field_offset());
 161     assert(oopDesc::is_oop(o, false), "Not an oop");
 162     return o;
 163   }
 164 
 165   void set_if_bufferable() {
 166     bool bufferable;
 167 
 168     int size_in_heap_words = size_helper();
 169     int base_offset = instanceOopDesc::base_offset_in_bytes();
 170     size_t size_in_bytes = size_in_heap_words * HeapWordSize - base_offset;
 171     bufferable = size_in_bytes <= BigValueTypeThreshold;
 172     if (size_in_bytes > VTBufferChunk::max_alloc_size()) bufferable = false;
 173     if (ValueTypesBufferMaxMemory == 0) bufferable = false;
 174 
 175     if (bufferable) {
 176       _extra_flags |= _extra_is_bufferable;
 177     } else {
 178       _extra_flags &= ~_extra_is_bufferable;
 179     }
 180   }
 181 
 182   bool is_bufferable() const          {
 183     return (_extra_flags & _extra_is_bufferable) != 0;
 184   }
 185 
 186   // Query if h/w provides atomic load/store
 187   bool is_atomic();
 188 
 189   bool flatten_array();
 190 
 191   bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
 192   int nonstatic_oop_count();
 193 
 194   // Prototype general store methods...
 195 
 196   // copy the fields, with no concern for GC barriers
 197   void raw_field_copy(void* src, void* dst, size_t raw_byte_size);
 198 
 199   void value_store(void* src, void* dst, bool dst_is_heap, bool dst_uninitialized) {
 200     value_store(src, dst, nonstatic_field_size() << LogBytesPerHeapOop, dst_is_heap, dst_uninitialized);
 201   }
 202 
 203   // store the value of this klass contained with src into dst, raw data ptr
 204   void value_store(void* src, void* dst, size_t raw_byte_size, bool dst_is_heap, bool dst_uninitialized);
 205 


 208   void iterate_over_inside_oops(OopClosure* f, oop value);
 209 
 210   // oop iterate raw value type data pointer (where oop_addr may not be an oop, but backing/array-element)
 211   template <typename T, class OopClosureType>
 212   inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
 213 
 214   template <typename T, class OopClosureType>
 215   inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi);
 216 
 217   // calling convention support
 218   void initialize_calling_convention();
 219   Array<SigEntry>* extended_sig() const {
 220     return *((Array<SigEntry>**)adr_extended_sig());
 221   }
 222   Array<VMRegPair>* return_regs() const {
 223     return *((Array<VMRegPair>**)adr_return_regs());
 224   }
 225   bool can_be_returned_as_fields() const;
 226   void save_oop_fields(const RegisterMap& map, GrowableArray<Handle>& handles) const;
 227   void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 228   oop realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, bool buffered, TRAPS);
 229   static ValueKlass* returned_value_klass(const RegisterMap& reg_map);
 230 
 231   // pack and unpack handlers. Need to be loadable from generated code
 232   // so at a fixed offset from the base of the klass pointer.
 233   static ByteSize pack_handler_offset() {
 234     fatal("Should be re-implemented using the ValueKlassStaticBlock indirection");
 235     return in_ByteSize(InstanceKlass::header_size() * wordSize);
 236   }
 237 
 238   static ByteSize unpack_handler_offset() {
 239     fatal("Should be re-implemented using the ValueKlassStaticBlock indirection");
 240     return in_ByteSize((InstanceKlass::header_size()+1) * wordSize);
 241   }
 242 
 243   static ByteSize default_value_offset_offset() {
 244     return byte_offset_of(ValueKlassFixedBlock, _default_value_offset);
 245   }
 246 
 247   void set_default_value_offset(int offset) {
 248     *((int*)adr_default_value_offset()) = offset;




 125 
 126  public:
 127   // Type testing
 128   bool is_value_slow() const        { return true; }
 129 
 130   // Casting from Klass*
 131   static ValueKlass* cast(Klass* k) {
 132     assert(k->is_value(), "cast to ValueKlass");
 133     return (ValueKlass*) k;
 134   }
 135 
 136   // Use this to return the size of an instance in heap words
 137   // Implementation is currently simple because all value types are allocated
 138   // in Java heap like Java objects.
 139   virtual int size_helper() const {
 140     return layout_helper_to_size_helper(layout_helper());
 141   }
 142 
 143   // allocate_instance() allocates a stand alone value in the Java heap
 144   instanceOop allocate_instance(TRAPS);




 145 
 146   // minimum number of bytes occupied by nonstatic fields, HeapWord aligned or pow2
 147   int raw_value_byte_size() const;
 148 
 149   int first_field_offset() const;
 150 
 151   address data_for_oop(oop o) const {
 152     return ((address) (void*) o) + first_field_offset();
 153   }
 154 
 155   oop oop_for_data(address data) const {
 156     oop o = (oop) (data - first_field_offset());
 157     assert(oopDesc::is_oop(o, false), "Not an oop");
 158     return o;
 159   }
 160 





















 161   // Query if h/w provides atomic load/store
 162   bool is_atomic();
 163 
 164   bool flatten_array();
 165 
 166   bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
 167   int nonstatic_oop_count();
 168 
 169   // Prototype general store methods...
 170 
 171   // copy the fields, with no concern for GC barriers
 172   void raw_field_copy(void* src, void* dst, size_t raw_byte_size);
 173 
 174   void value_store(void* src, void* dst, bool dst_is_heap, bool dst_uninitialized) {
 175     value_store(src, dst, nonstatic_field_size() << LogBytesPerHeapOop, dst_is_heap, dst_uninitialized);
 176   }
 177 
 178   // store the value of this klass contained with src into dst, raw data ptr
 179   void value_store(void* src, void* dst, size_t raw_byte_size, bool dst_is_heap, bool dst_uninitialized);
 180 


 183   void iterate_over_inside_oops(OopClosure* f, oop value);
 184 
 185   // oop iterate raw value type data pointer (where oop_addr may not be an oop, but backing/array-element)
 186   template <typename T, class OopClosureType>
 187   inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
 188 
 189   template <typename T, class OopClosureType>
 190   inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi);
 191 
 192   // calling convention support
 193   void initialize_calling_convention();
 194   Array<SigEntry>* extended_sig() const {
 195     return *((Array<SigEntry>**)adr_extended_sig());
 196   }
 197   Array<VMRegPair>* return_regs() const {
 198     return *((Array<VMRegPair>**)adr_return_regs());
 199   }
 200   bool can_be_returned_as_fields() const;
 201   void save_oop_fields(const RegisterMap& map, GrowableArray<Handle>& handles) const;
 202   void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 203   oop realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS);
 204   static ValueKlass* returned_value_klass(const RegisterMap& reg_map);
 205 
 206   // pack and unpack handlers. Need to be loadable from generated code
 207   // so at a fixed offset from the base of the klass pointer.
 208   static ByteSize pack_handler_offset() {
 209     fatal("Should be re-implemented using the ValueKlassStaticBlock indirection");
 210     return in_ByteSize(InstanceKlass::header_size() * wordSize);
 211   }
 212 
 213   static ByteSize unpack_handler_offset() {
 214     fatal("Should be re-implemented using the ValueKlassStaticBlock indirection");
 215     return in_ByteSize((InstanceKlass::header_size()+1) * wordSize);
 216   }
 217 
 218   static ByteSize default_value_offset_offset() {
 219     return byte_offset_of(ValueKlassFixedBlock, _default_value_offset);
 220   }
 221 
 222   void set_default_value_offset(int offset) {
 223     *((int*)adr_default_value_offset()) = offset;


< prev index next >