src/hotspot/share/oops/oop.inline.hpp

Print this page




  54 markOop* oopDesc::mark_addr_raw() const {
  55   return (markOop*) &_mark;
  56 }
  57 
  58 void oopDesc::set_mark(volatile markOop m) {
  59   HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  60 }
  61 
  62 void oopDesc::set_mark_raw(volatile markOop m) {
  63   _mark = m;
  64 }
  65 
  66 void oopDesc::release_set_mark(markOop m) {
  67   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  68 }
  69 
  70 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
  71   return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
  72 }
  73 
  74 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark) {
  75   return Atomic::cmpxchg(new_mark, &_mark, old_mark);
  76 }
  77 
  78 void oopDesc::init_mark() {
  79   set_mark(markOopDesc::prototype_for_object(this));
  80 }
  81 
  82 void oopDesc::init_mark_raw() {
  83   set_mark_raw(markOopDesc::prototype_for_object(this));
  84 }
  85 
  86 Klass* oopDesc::klass() const {
  87   if (UseCompressedClassPointers) {
  88     return Klass::decode_klass_not_null(_metadata._compressed_klass);
  89   } else {
  90     return _metadata._klass;
  91   }
  92 }
  93 
  94 Klass* oopDesc::klass_or_null() const volatile {
  95   if (UseCompressedClassPointers) {


 322   // The extra heap check is needed since the obj might be locked, in which case the
 323   // mark would point to a stack location and have the sentinel bit cleared
 324   return mark_raw()->is_marked();
 325 }
 326 
 327 // Used by scavengers
 328 void oopDesc::forward_to(oop p) {
 329   assert(check_obj_alignment(p),
 330          "forwarding to something not aligned");
 331   assert(Universe::heap()->is_in_reserved(p),
 332          "forwarding to something not in heap");
 333   assert(!MetaspaceShared::is_archive_object(oop(this)) &&
 334          !MetaspaceShared::is_archive_object(p),
 335          "forwarding archive object");
 336   markOop m = markOopDesc::encode_pointer_as_mark(p);
 337   assert(m->decode_pointer() == p, "encoding must be reversable");
 338   set_mark_raw(m);
 339 }
 340 
 341 // Used by parallel scavengers
 342 bool oopDesc::cas_forward_to(oop p, markOop compare) {
 343   assert(check_obj_alignment(p),
 344          "forwarding to something not aligned");
 345   assert(Universe::heap()->is_in_reserved(p),
 346          "forwarding to something not in heap");
 347   markOop m = markOopDesc::encode_pointer_as_mark(p);
 348   assert(m->decode_pointer() == p, "encoding must be reversable");
 349   return cas_set_mark_raw(m, compare) == compare;
 350 }
 351 
 352 oop oopDesc::forward_to_atomic(oop p) {
 353   markOop oldMark = mark_raw();
 354   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
 355   markOop curMark;
 356 
 357   assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
 358   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
 359 
 360   while (!oldMark->is_marked()) {
 361     curMark = cas_set_mark_raw(forwardPtrMark, oldMark);
 362     assert(is_forwarded(), "object should have been forwarded");
 363     if (curMark == oldMark) {
 364       return NULL;
 365     }
 366     // If the CAS was unsuccessful then curMark->is_marked()
 367     // should return true as another thread has CAS'd in another
 368     // forwarding pointer.
 369     oldMark = curMark;




  54 markOop* oopDesc::mark_addr_raw() const {
  55   return (markOop*) &_mark;
  56 }
  57 
  58 void oopDesc::set_mark(volatile markOop m) {
  59   HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  60 }
  61 
  62 void oopDesc::set_mark_raw(volatile markOop m) {
  63   _mark = m;
  64 }
  65 
  66 void oopDesc::release_set_mark(markOop m) {
  67   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  68 }
  69 
  70 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
  71   return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
  72 }
  73 
  74 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
  75   return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
  76 }
  77 
  78 void oopDesc::init_mark() {
  79   set_mark(markOopDesc::prototype_for_object(this));
  80 }
  81 
  82 void oopDesc::init_mark_raw() {
  83   set_mark_raw(markOopDesc::prototype_for_object(this));
  84 }
  85 
  86 Klass* oopDesc::klass() const {
  87   if (UseCompressedClassPointers) {
  88     return Klass::decode_klass_not_null(_metadata._compressed_klass);
  89   } else {
  90     return _metadata._klass;
  91   }
  92 }
  93 
  94 Klass* oopDesc::klass_or_null() const volatile {
  95   if (UseCompressedClassPointers) {


 322   // The extra heap check is needed since the obj might be locked, in which case the
 323   // mark would point to a stack location and have the sentinel bit cleared
 324   return mark_raw()->is_marked();
 325 }
 326 
 327 // Used by scavengers
 328 void oopDesc::forward_to(oop p) {
 329   assert(check_obj_alignment(p),
 330          "forwarding to something not aligned");
 331   assert(Universe::heap()->is_in_reserved(p),
 332          "forwarding to something not in heap");
 333   assert(!MetaspaceShared::is_archive_object(oop(this)) &&
 334          !MetaspaceShared::is_archive_object(p),
 335          "forwarding archive object");
 336   markOop m = markOopDesc::encode_pointer_as_mark(p);
 337   assert(m->decode_pointer() == p, "encoding must be reversable");
 338   set_mark_raw(m);
 339 }
 340 
 341 // Used by parallel scavengers
 342 bool oopDesc::cas_forward_to(oop p, markOop compare, atomic_memory_order order) {
 343   assert(check_obj_alignment(p),
 344          "forwarding to something not aligned");
 345   assert(Universe::heap()->is_in_reserved(p),
 346          "forwarding to something not in heap");
 347   markOop m = markOopDesc::encode_pointer_as_mark(p);
 348   assert(m->decode_pointer() == p, "encoding must be reversable");
 349   return cas_set_mark_raw(m, compare, order) == compare;
 350 }
 351 
 352 oop oopDesc::forward_to_atomic(oop p) {
 353   markOop oldMark = mark_raw();
 354   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
 355   markOop curMark;
 356 
 357   assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
 358   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
 359 
 360   while (!oldMark->is_marked()) {
 361     curMark = cas_set_mark_raw(forwardPtrMark, oldMark);
 362     assert(is_forwarded(), "object should have been forwarded");
 363     if (curMark == oldMark) {
 364       return NULL;
 365     }
 366     // If the CAS was unsuccessful then curMark->is_marked()
 367     // should return true as another thread has CAS'd in another
 368     // forwarding pointer.
 369     oldMark = curMark;