< prev index next >

src/hotspot/share/oops/valueKlass.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/gcLocker.inline.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 























  49 int ValueKlass::first_field_offset_old() {
  50 #ifdef ASSERT
  51   int first_offset = INT_MAX;
  52   for (AllFieldStream fs(this); !fs.done(); fs.next()) {
  53     if (fs.offset() < first_offset) first_offset= fs.offset();
  54   }
  55 #endif
  56   int base_offset = instanceOopDesc::base_offset_in_bytes();
  57   // The first field of value types is aligned on a long boundary
  58   base_offset = align_up(base_offset, BytesPerLong);
  59   assert(base_offset == first_offset, "inconsistent offsets");
  60   return base_offset;
  61 }
  62 
  63 int ValueKlass::raw_value_byte_size() {
  64   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  65   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  66   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  67   if (heapOopAlignedSize >= longSize || contains_oops()) {
  68     return heapOopAlignedSize;


 172   if (!vak->is_valueArray_klass()) {
 173     storage_props.clear_flattened();
 174   }
 175   if (or_null) {
 176     return vak->array_klass_or_null(storage_props, rank);
 177   }
 178   return vak->array_klass(storage_props, rank, THREAD);
 179 }
 180 
 181 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 182   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 183     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 184   }
 185   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 186 }
 187 
 188 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 189   InstanceKlass::array_klasses_do(f);
 190   if (get_value_array_klass() != NULL)
 191     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 192 }
 193 
 194 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 195   if (!UseNewLayout) {
 196     /*
 197      * Try not to shear fields even if not an atomic store...
 198      *
 199      * First 3 cases handle value array store, otherwise works on the same basis
 200      * as JVM_Clone, at this size data is aligned. The order of primitive types
 201      * is largest to smallest, and it not possible for fields to stradle long
 202      * copy boundaries.
 203      *
 204      * If MT without exclusive access, possible to observe partial value store,
 205      * but not partial primitive and reference field values
 206      */
 207     switch (raw_byte_size) {
 208     case 1:
 209       *((jbyte*) dst) = *(jbyte*)src;
 210       break;
 211     case 2:
 212       *((jshort*) dst) = *(jshort*)src;
 213       break;
 214     case 4:
 215       *((jint*) dst) = *(jint*) src;
 216       break;
 217     default:
 218       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 219       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 220     }
 221   } else {
 222     int size = this->get_exact_size_in_bytes();
 223     int length;
 224     switch (this->get_alignment()) {
 225     case BytesPerLong:
 226       length = size >> LogBytesPerLong;
 227       if (length > 0) {
 228         Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length);
 229         size -= length << LogBytesPerLong;
 230         src = (jlong*)src + length;
 231         dst = (jlong*)dst + length;
 232       }
 233       // Fallthrough
 234     case BytesPerInt:
 235       length = size >> LogBytesPerInt;
 236       if (length > 0) {
 237         Copy::conjoint_jints_atomic((jint*)src, (jint*)dst, length);
 238         size -= length << LogBytesPerInt;
 239         src = (jint*)src + length;
 240         dst = (jint*)dst + length;
 241       }
 242       // Fallthrough
 243     case BytesPerShort:
 244       length = size >> LogBytesPerShort;
 245       if (length > 0) {
 246         Copy::conjoint_jshorts_atomic((jshort*)src, (jshort*)dst, length);
 247         size -= length << LogBytesPerShort;
 248         src = (jshort*)src + length;
 249         dst = (jshort*)dst +length;
 250       }
 251       // Fallthrough
 252     case 1:
 253       if (size > 0) Copy::conjoint_jbytes_atomic((jbyte*)src, (jbyte*)dst, size);
 254       break;
 255     default:
 256       fatal("Unsupported alignment");
 257     }
 258   }
 259 }
 260 
 261 /*
 262  * Store the value of this klass contained with src into dst.
 263  *
 264  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 265  *
 266  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 267  * so raw point is "safe" for now.
 268  *
 269  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 270  *
 271  */
 272 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 273   if (contains_oops()) {
 274     if (dst_heap) {
 275       // src/dst aren't oops, need offset to adjust oop map offset
 276       const address dst_oop_addr = ((address) dst) - first_field_offset();
 277 
 278       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());
 279 
 280       // Pre-barriers...
 281       OopMapBlock* map = start_of_nonstatic_oop_maps();
 282       OopMapBlock* const end = map + nonstatic_oop_map_count();
 283       while (map != end) {
 284         // Shame we can't just use the existing oop iterator...src/dst aren't oop
 285         address doop_address = dst_oop_addr + map->offset();
 286         // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops)
 287         if (UseCompressedOops) {
 288           bs->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 289         } else {
 290           bs->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 291         }
 292         map++;
 293       }
 294 
 295       raw_field_copy(src, dst, raw_byte_size);
 296 
 297       // Post-barriers...
 298       map = start_of_nonstatic_oop_maps();
 299       while (map != end) {
 300         address doop_address = dst_oop_addr + map->offset();
 301         bs->write_ref_array((HeapWord*) doop_address, map->count());
 302         map++;
 303       }
 304     } else { // Buffered value case
 305       raw_field_copy(src, dst, raw_byte_size);
 306     }
 307   } else {   // Primitive-only case...
 308     raw_field_copy(src, dst, raw_byte_size);
 309   }
 310 }
 311 
 312 // Value type arguments are not passed by reference, instead each
 313 // field of the value type is passed as an argument. This helper
 314 // function collects the fields of the value types (including embedded
 315 // value type's fields) in a list. Included with the field's type is
 316 // the offset of each field in the value type: i2c and c2i adapters
 317 // need that to load or store fields. Finally, the list of fields is
 318 // sorted in order of increasing offsets: the adapters and the
 319 // compiled code need to agree upon the order of fields.
 320 //
 321 // The list of basic types that is returned starts with a T_VALUETYPE
 322 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as
 323 // delimiters. Every entry between the two is a field of the value
 324 // type. If there's an embedded value type in the list, it also starts
 325 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 326 // generate a unique fingerprint for the method's adapters and we can
 327 // generate the list of basic types from the interpreter point of view
 328 // (value types passed as reference: iterate on the list until a
 329 // T_VALUETYPE, drop everything until and including the closing




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/gcLocker.inline.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.inline.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49   // Constructor
  50 ValueKlass::ValueKlass(const ClassFileParser& parser)
  51     : InstanceKlass(parser, InstanceKlass::_misc_kind_value_type, InstanceKlass::ID) {
  52   _adr_valueklass_fixed_block = valueklass_static_block();
  53   // Addresses used for value type calling convention
  54   *((Array<SigEntry>**)adr_extended_sig()) = NULL;
  55   *((Array<VMRegPair>**)adr_return_regs()) = NULL;
  56   *((address*)adr_pack_handler()) = NULL;
  57   *((address*)adr_unpack_handler()) = NULL;
  58   assert(pack_handler() == NULL, "pack handler not null");
  59   *((int*)adr_default_value_offset()) = 0;
  60   *((Klass**)adr_value_array_klass()) = NULL;
  61   set_prototype_header(markWord::always_locked_prototype());
  62 }
  63 
  64 oop ValueKlass::default_value() {
  65   oop val = java_mirror()->obj_field_acquire(default_value_offset());
  66   assert(oopDesc::is_oop(val), "Sanity check");
  67   assert(val->is_value(), "Sanity check");
  68   assert(val->klass() == this, "sanity check");
  69   return val;
  70 }
  71 
  72 int ValueKlass::first_field_offset_old() {
  73 #ifdef ASSERT
  74   int first_offset = INT_MAX;
  75   for (AllFieldStream fs(this); !fs.done(); fs.next()) {
  76     if (fs.offset() < first_offset) first_offset= fs.offset();
  77   }
  78 #endif
  79   int base_offset = instanceOopDesc::base_offset_in_bytes();
  80   // The first field of value types is aligned on a long boundary
  81   base_offset = align_up(base_offset, BytesPerLong);
  82   assert(base_offset == first_offset, "inconsistent offsets");
  83   return base_offset;
  84 }
  85 
  86 int ValueKlass::raw_value_byte_size() {
  87   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  88   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  89   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  90   if (heapOopAlignedSize >= longSize || contains_oops()) {
  91     return heapOopAlignedSize;


 195   if (!vak->is_valueArray_klass()) {
 196     storage_props.clear_flattened();
 197   }
 198   if (or_null) {
 199     return vak->array_klass_or_null(storage_props, rank);
 200   }
 201   return vak->array_klass(storage_props, rank, THREAD);
 202 }
 203 
 204 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 205   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 206     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 207   }
 208   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 209 }
 210 
 211 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 212   InstanceKlass::array_klasses_do(f);
 213   if (get_value_array_klass() != NULL)
 214     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);






















































































































 215 }
 216 
 217 // Value type arguments are not passed by reference, instead each
 218 // field of the value type is passed as an argument. This helper
 219 // function collects the fields of the value types (including embedded
 220 // value type's fields) in a list. Included with the field's type is
 221 // the offset of each field in the value type: i2c and c2i adapters
 222 // need that to load or store fields. Finally, the list of fields is
 223 // sorted in order of increasing offsets: the adapters and the
 224 // compiled code need to agree upon the order of fields.
 225 //
 226 // The list of basic types that is returned starts with a T_VALUETYPE
 227 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as
 228 // delimiters. Every entry between the two is a field of the value
 229 // type. If there's an embedded value type in the list, it also starts
 230 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 231 // generate a unique fingerprint for the method's adapters and we can
 232 // generate the list of basic types from the interpreter point of view
 233 // (value types passed as reference: iterate on the list until a
 234 // T_VALUETYPE, drop everything until and including the closing


< prev index next >