< prev index next >

src/share/vm/oops/oop.inline.hpp

Print this page
rev 12854 : [mq]: gcinterface.patch

@@ -49,32 +49,15 @@
 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
   oopDesc::bs()->write_ref_field_pre(p, v);
 }
 
 template <class T> void oop_store(T* p, oop v) {
-  if (always_do_update_barrier) {
-    oop_store((volatile T*)p, v);
-  } else {
-    update_barrier_set_pre(p, v);
-    oopDesc::encode_store_heap_oop(p, v);
-    // always_do_update_barrier == false =>
-    // Either we are at a safepoint (in GC) or CMS is not used. In both
-    // cases it's unnecessary to mark the card as dirty with release sematics.
-    update_barrier_set((void*)p, v, false /* release */);  // cast away type
-  }
+  oopDesc::bs()->oop_store(p, v);
 }
 
 template <class T> void oop_store(volatile T* p, oop v) {
-  update_barrier_set_pre((T*)p, v);   // cast away volatile
-  // Used by release_obj_field_put, so use release_store_ptr.
-  oopDesc::release_encode_store_heap_oop(p, v);
-  // When using CMS we must mark the card corresponding to p as dirty
-  // with release sematics to prevent that CMS sees the dirty card but
-  // not the new value v at p due to reordering of the two
-  // stores. Note that CMS has a concurrent precleaning phase, where
-  // it reads the card table while the Java threads are running.
-  update_barrier_set((void*)p, v, true /* release */);    // cast away type
+  oopDesc::bs()->oop_store(p, v);
 }
 
 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
 // (without having to remember the function name this calls).
 inline void oop_store_raw(HeapWord* addr, oop value) {

@@ -272,11 +255,11 @@
       //     is_objArray() && is_forwarded()   // covers first scenario above
       //  || is_typeArray()                    // covers second scenario above
       // If and when UseParallelGC uses the same obj array oop stealing/chunking
       // technique, we will need to suitably modify the assertion.
       assert((s == klass->oop_size(this)) ||
-             (Universe::heap()->is_gc_active() &&
+             (GC::gc()->heap()->is_gc_active() &&
               ((is_typeArray() && UseConcMarkSweepGC) ||
                (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
              "wrong array object size");
     } else {
       // Must be zero, so bite the bullet and take the virtual call.

@@ -338,11 +321,11 @@
 }
 
 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   assert(!is_null(v), "oop value can never be zero");
   assert(check_obj_alignment(v), "Address not aligned");
-  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+  assert(GC::gc()->heap()->is_in_reserved(v), "Address not in heap");
   address base = Universe::narrow_oop_base();
   int    shift = Universe::narrow_oop_shift();
   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
   uint64_t result = pd >> shift;

@@ -543,13 +526,13 @@
 
 // used only for asserts
 bool oopDesc::is_oop(bool ignore_mark_word) const {
   oop obj = (oop) this;
   if (!check_obj_alignment(obj)) return false;
-  if (!Universe::heap()->is_in_reserved(obj)) return false;
+  if (!GC::gc()->heap()->is_in_reserved(obj)) return false;
   // obj is aligned and accessible in heap
-  if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
+  if (GC::gc()->heap()->is_in_reserved(obj->klass_or_null())) return false;
 
   // Header verification: the mark is typically non-NULL. If we're
   // at a safepoint, it must not be null.
   // Outside of a safepoint, the header could be changing (for example,
   // another thread could be inflating a lock on this object).

@@ -569,22 +552,22 @@
 }
 
 #ifndef PRODUCT
 // used only for asserts
 bool oopDesc::is_unlocked_oop() const {
-  if (!Universe::heap()->is_in_reserved(this)) return false;
+  if (!GC::gc()->heap()->is_in_reserved(this)) return false;
   return mark()->is_unlocked();
 }
 #endif // PRODUCT
 
 // Used only for markSweep, scavenging
 bool oopDesc::is_gc_marked() const {
   return mark()->is_marked();
 }
 
 bool oopDesc::is_scavengable() const {
-  return Universe::heap()->is_scavengable(this);
+  return GC::gc()->heap()->is_scavengable(this);
 }
 
 // Used by scavengers
 bool oopDesc::is_forwarded() const {
   // The extra heap check is needed since the obj might be locked, in which case the

@@ -594,22 +577,22 @@
 
 // Used by scavengers
 void oopDesc::forward_to(oop p) {
   assert(check_obj_alignment(p),
          "forwarding to something not aligned");
-  assert(Universe::heap()->is_in_reserved(p),
+  assert(GC::gc()->heap()->is_in_reserved(p),
          "forwarding to something not in heap");
   markOop m = markOopDesc::encode_pointer_as_mark(p);
   assert(m->decode_pointer() == p, "encoding must be reversable");
   set_mark(m);
 }
 
 // Used by parallel scavengers
 bool oopDesc::cas_forward_to(oop p, markOop compare) {
   assert(check_obj_alignment(p),
          "forwarding to something not aligned");
-  assert(Universe::heap()->is_in_reserved(p),
+  assert(GC::gc()->heap()->is_in_reserved(p),
          "forwarding to something not in heap");
   markOop m = markOopDesc::encode_pointer_as_mark(p);
   assert(m->decode_pointer() == p, "encoding must be reversable");
   return cas_set_mark(m, compare) == compare;
 }
< prev index next >