< prev index next >

src/hotspot/share/oops/valueKlass.cpp

Print this page




  76         last_tsz = type2aelembytes(type);
  77       } else if (type == T_VALUETYPE) {
  78         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  79         return heapOopAlignedSize;
  80       } else {
  81         guarantee(0, "Unknown type %d", type);
  82       }
  83       assert(last_tsz != 0, "Invariant");
  84       last_offset = fs.offset();
  85     }
  86   }
  87   // Assumes VT with no fields are meaningless and illegal
  88   last_offset += last_tsz;
  89   assert(last_offset > first_offset && last_tsz, "Invariant");
  90   return 1 << upper_log2(last_offset - first_offset);
  91 }
  92 
  93 instanceOop ValueKlass::allocate_instance(TRAPS) {
  94   int size = size_helper();  // Query before forming handle.
  95 
  96   return (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);


  97 }
  98 
  99 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
 100   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
 101 
 102   instanceOop value = NULL;
 103   if (is_bufferable()) {
 104     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 105     *in_heap = false;
 106   }
 107   if (value == NULL) {
 108     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 109     value = allocate_instance(CHECK_NULL);
 110     *in_heap = true;
 111   }
 112   return value;
 113 }
 114 
 115 bool ValueKlass::is_atomic() {
 116   return (nonstatic_field_size() * heapOopSize) <= longSize;


 600     for (; map < end_map; map++) {
 601       oop* p = (oop*) (((char*)(oopDesc*)value) + map->offset());
 602       oop* const end = p + map->count();
 603       for (; p < end; ++p) {
 604         assert(oopDesc::is_oop_or_null(*p), "Sanity check");
 605         f->do_oop(p);
 606       }
 607     }
 608   } else {
 609     for (; map < end_map; map++) {
 610       narrowOop* p = (narrowOop*) (((char*)(oopDesc*)value) + map->offset());
 611       narrowOop* const end = p + map->count();
 612       for (; p < end; ++p) {
 613         oop o = oopDesc::decode_heap_oop(*p);
 614         assert(Universe::heap()->is_in_reserved_or_null(o), "Sanity check");
 615         assert(oopDesc::is_oop_or_null(o), "Sanity check");
 616         f->do_oop(p);
 617       }
 618     }
 619   }










 620 }


  76         last_tsz = type2aelembytes(type);
  77       } else if (type == T_VALUETYPE) {
  78         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  79         return heapOopAlignedSize;
  80       } else {
  81         guarantee(0, "Unknown type %d", type);
  82       }
  83       assert(last_tsz != 0, "Invariant");
  84       last_offset = fs.offset();
  85     }
  86   }
  87   // Assumes VT with no fields are meaningless and illegal
  88   last_offset += last_tsz;
  89   assert(last_offset > first_offset && last_tsz, "Invariant");
  90   return 1 << upper_log2(last_offset - first_offset);
  91 }
  92 
  93 instanceOop ValueKlass::allocate_instance(TRAPS) {
  94   int size = size_helper();  // Query before forming handle.
  95 
  96   instanceOop oop = (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);
  97   assert(oop->mark()->is_always_locked(), "Unlocked value type");
  98   return oop;
  99 }
 100 
 101 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
 102   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
 103 
 104   instanceOop value = NULL;
 105   if (is_bufferable()) {
 106     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 107     *in_heap = false;
 108   }
 109   if (value == NULL) {
 110     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 111     value = allocate_instance(CHECK_NULL);
 112     *in_heap = true;
 113   }
 114   return value;
 115 }
 116 
 117 bool ValueKlass::is_atomic() {
 118   return (nonstatic_field_size() * heapOopSize) <= longSize;


 602     for (; map < end_map; map++) {
 603       oop* p = (oop*) (((char*)(oopDesc*)value) + map->offset());
 604       oop* const end = p + map->count();
 605       for (; p < end; ++p) {
 606         assert(oopDesc::is_oop_or_null(*p), "Sanity check");
 607         f->do_oop(p);
 608       }
 609     }
 610   } else {
 611     for (; map < end_map; map++) {
 612       narrowOop* p = (narrowOop*) (((char*)(oopDesc*)value) + map->offset());
 613       narrowOop* const end = p + map->count();
 614       for (; p < end; ++p) {
 615         oop o = oopDesc::decode_heap_oop(*p);
 616         assert(Universe::heap()->is_in_reserved_or_null(o), "Sanity check");
 617         assert(oopDesc::is_oop_or_null(o), "Sanity check");
 618         f->do_oop(p);
 619       }
 620     }
 621   }
 622 }
 623 
 624 void ValueKlass::verify_on(outputStream* st) {
 625   InstanceKlass::verify_on(st);
 626   guarantee(prototype_header()->is_always_locked(), "Prototype header is not always locked");
 627 }
 628 
 629 void ValueKlass::oop_verify_on(oop obj, outputStream* st) {
 630   InstanceKlass::oop_verify_on(obj, st);
 631   guarantee(obj->mark()->is_always_locked(), "Header is not always locked");
 632 }
< prev index next >