< prev index next >

src/share/vm/oops/valueKlass.hpp

Print this page




  52   // Returns the array class with this class as element type
  53   Klass* array_klass_impl(bool or_null, TRAPS);
  54 
  55  public:
  56   // Type testing
  57   bool is_value_slow() const        { return true; }
  58 
  59   // Casting from Klass*
  60   static ValueKlass* cast(Klass* k) {
  61     assert(k->is_value(), "cast to ValueKlass");
  62     return (ValueKlass*) k;
  63   }
  64 
  65   // Use this to return the size of an instance in heap words
  66   // Implementation is currently simple because all value types are allocated
  67   // in Java heap like Java objects.
  68   virtual int size_helper() const {
  69     return layout_helper_to_size_helper(layout_helper());
  70   }
  71 







  72   // minimum number of bytes occupied by nonstatic fields, HeapWord aligned or pow2
  73   int raw_value_byte_size() const;
  74 
  75   int first_field_offset() const;
  76 
  77   address data_for_oop(oop o) const {
  78     return ((address) (void*) o) + first_field_offset();
  79   }
  80 
  81    oop oop_for_data(address data) const {
  82     oop o = (oop) (data - first_field_offset());
  83     assert(o->is_oop(false), "Not an oop");
  84     return o;
  85   }
  86 





















  87   // Query if h/w provides atomic load/store
  88   bool is_atomic();
  89 
  90   bool flatten_array();
  91 
  92   bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
  93   int nonstatic_oop_count();
  94 
  95   // Prototype general store methods...
  96 
  97   // copy the fields, with no concern for GC barriers
  98   void raw_field_copy(void* src, void* dst, size_t raw_byte_size);
  99 
 100   void value_store(void* src, void* dst, bool dst_is_heap, bool dst_uninitialized) {
 101     value_store(src, dst, nonstatic_field_size() << LogBytesPerHeapOop, dst_is_heap, dst_uninitialized);
 102   }
 103 
 104   // store the value of this klass contained with src into dst, raw data ptr
 105   void value_store(void* src, void* dst, size_t raw_byte_size, bool dst_is_heap, bool dst_uninitialized);
 106 
 107 
 108   oop derive_value_type_copy(Handle src, InstanceKlass* target_klass, TRAPS);
 109 
 110   // GC support...
 111 
 112   // oop iterate raw value type data pointer (where oop_addr may not be an oop, but backing/array-element)
 113   template <bool nv, typename T, class OopClosureType>
 114   inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
 115 
 116   template <bool nv, typename T, class OopClosureType>
 117   inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi);
 118 
 119   // calling convention support
 120   GrowableArray<SigEntry> collect_fields(int base_off = 0) const;
 121   GrowableArray<SigEntry> return_convention(VMRegPair*& regs, int& nb_fields) const;
 122   void save_oop_fields(const GrowableArray<SigEntry>& sig_vk, RegisterMap& map, const VMRegPair* regs, GrowableArray<Handle>& handles, int nb_fields) const;
 123   bool save_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 124   void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 125   oop realloc_result(const GrowableArray<SigEntry>& sig_vk, const RegisterMap& reg_map, const VMRegPair* regs, const GrowableArray<Handle>& handles, int nb_fields, TRAPS);
 126   static ValueKlass* returned_value_type(const RegisterMap& reg_map);
 127 };
 128 


  52   // Returns the array class with this class as element type
  53   Klass* array_klass_impl(bool or_null, TRAPS);
  54 
  55  public:
  56   // Type testing
  57   bool is_value_slow() const        { return true; }
  58 
  59   // Casting from Klass*
  60   static ValueKlass* cast(Klass* k) {
  61     assert(k->is_value(), "cast to ValueKlass");
  62     return (ValueKlass*) k;
  63   }
  64 
  65   // Use this to return the size of an instance in heap words
  66   // Implementation is currently simple because all value types are allocated
  67   // in Java heap like Java objects.
  68   virtual int size_helper() const {
  69     return layout_helper_to_size_helper(layout_helper());
  70   }
  71 
  72   // allocate_instance() allocates a stand alone value in the Java heap
  73   instanceOop allocate_instance(TRAPS);
  74   // allocate_buffered_or_heap_instance() tries to allocate a value in the
  75   // thread local value buffer, if allocation fails, it allocates it in the
  76   // Java heap
  77   instanceOop allocate_buffered_or_heap_instance(bool* in_heap, TRAPS);
  78 
  79   // minimum number of bytes occupied by nonstatic fields, HeapWord aligned or pow2
  80   int raw_value_byte_size() const;
  81 
  82   int first_field_offset() const;
  83 
  84   address data_for_oop(oop o) const {
  85     return ((address) (void*) o) + first_field_offset();
  86   }
  87 
  88    oop oop_for_data(address data) const {
  89     oop o = (oop) (data - first_field_offset());
  90     assert(o->is_oop(false), "Not an oop");
  91     return o;
  92   }
  93 
  94    void set_if_bufferable() {
  95      bool bufferable;
  96      if (contains_oops()) {
  97        bufferable = false;
  98      } else {
  99        int size_in_heap_words = size_helper();
 100        int base_offset = instanceOopDesc::base_offset_in_bytes();
 101        size_t size_in_bytes = size_in_heap_words * HeapWordSize - base_offset;
 102        bufferable = size_in_bytes <= BigValueTypeThreshold;
 103      }
 104      if (bufferable) {
 105        _extra_flags |= _extra_is_bufferable;
 106      } else {
 107        _extra_flags &= ~_extra_is_bufferable;
 108      }
 109    }
 110 
 111   bool is_bufferable() const          {
 112     return (_extra_flags & _extra_is_bufferable) != 0;
 113   }
 114 
 115   // Query if h/w provides atomic load/store
 116   bool is_atomic();
 117 
 118   bool flatten_array();
 119 
 120   bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
 121   int nonstatic_oop_count();
 122 
 123   // Prototype general store methods...
 124 
 125   // copy the fields, with no concern for GC barriers
 126   void raw_field_copy(void* src, void* dst, size_t raw_byte_size);
 127 
 128   void value_store(void* src, void* dst, bool dst_is_heap, bool dst_uninitialized) {
 129     value_store(src, dst, nonstatic_field_size() << LogBytesPerHeapOop, dst_is_heap, dst_uninitialized);
 130   }
 131 
 132   // store the value of this klass contained with src into dst, raw data ptr
 133   void value_store(void* src, void* dst, size_t raw_byte_size, bool dst_is_heap, bool dst_uninitialized);
 134 
 135   oop unbox(Handle src, InstanceKlass* target_klass, TRAPS);
 136   oop box(Handle src, InstanceKlass* target_klass, TRAPS);
 137 
 138   // GC support...
 139 
 140   // oop iterate raw value type data pointer (where oop_addr may not be an oop, but backing/array-element)
 141   template <bool nv, typename T, class OopClosureType>
 142   inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
 143 
 144   template <bool nv, typename T, class OopClosureType>
 145   inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi);
 146 
 147   // calling convention support
 148   GrowableArray<SigEntry> collect_fields(int base_off = 0) const;
 149   GrowableArray<SigEntry> return_convention(VMRegPair*& regs, int& nb_fields) const;
 150   void save_oop_fields(const GrowableArray<SigEntry>& sig_vk, RegisterMap& map, const VMRegPair* regs, GrowableArray<Handle>& handles, int nb_fields) const;
 151   bool save_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 152   void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
 153   oop realloc_result(const GrowableArray<SigEntry>& sig_vk, const RegisterMap& reg_map, const VMRegPair* regs, const GrowableArray<Handle>& handles, int nb_fields, TRAPS);
 154   static ValueKlass* returned_value_type(const RegisterMap& reg_map);
 155 };
 156 
< prev index next >