< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 49290 : [mq]: JDK-8199735.01.patch

@@ -1039,11 +1039,11 @@
   // Otherwise, copy the object.  Here we must be careful to insert the
   // klass pointer last, since this marks the block as an allocated object.
   // Except with compressed oops it's the mark word.
   HeapWord* old_ptr = (HeapWord*)old;
   // Restore the mark word copied above.
-  obj->set_mark(m);
+  obj->set_mark_raw(m);
   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   OrderAccess::storestore();
 
   if (UseCompressedClassPointers) {

@@ -7806,12 +7806,12 @@
   size_t i = num;
   oop  cur = _overflow_list;
   const markOop proto = markOopDesc::prototype();
   NOT_PRODUCT(ssize_t n = 0;)
   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
-    next = oop(cur->mark());
-    cur->set_mark(proto);   // until proven otherwise
+    next = oop(cur->mark_raw());
+    cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = stack->push(cur);
     assert(res, "Bit off more than can chew?");
     NOT_PRODUCT(n++;)
   }

@@ -7890,24 +7890,24 @@
   }
   assert(prefix != NULL && prefix != BUSY, "Error");
   size_t i = num;
   oop cur = prefix;
   // Walk down the first "num" objects, unless we reach the end.
-  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
-  if (cur->mark() == NULL) {
+  for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
+  if (cur->mark_raw() == NULL) {
     // We have "num" or fewer elements in the list, so there
     // is nothing to return to the global list.
     // Write back the NULL in lieu of the BUSY we wrote
     // above, if it is still the same value.
     if (_overflow_list == BUSY) {
       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     // Chop off the suffix and return it to the global list.
-    assert(cur->mark() != BUSY, "Error");
-    oop suffix_head = cur->mark(); // suffix will be put back on global list
-    cur->set_mark(NULL);           // break off suffix
+    assert(cur->mark_raw() != BUSY, "Error");
+    oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
+    cur->set_mark_raw(NULL);           // break off suffix
     // It's possible that the list is still in the empty(busy) state
     // we left it in a short while ago; in that case we may be
     // able to place back the suffix without incurring the cost
     // of a walk down the list.
     oop observed_overflow_list = _overflow_list;

@@ -7923,22 +7923,22 @@
     }
     if (!attached) {
       // Too bad, someone else sneaked in (at least) an element; we'll need
       // to do a splice. Find tail of suffix so we can prepend suffix to global
       // list.
-      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
+      for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
       oop suffix_tail = cur;
-      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
+      assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
              "Tautology");
       observed_overflow_list = _overflow_list;
       do {
         cur_overflow_list = observed_overflow_list;
         if (cur_overflow_list != BUSY) {
           // Do the splice ...
-          suffix_tail->set_mark(markOop(cur_overflow_list));
+          suffix_tail->set_mark_raw(markOop(cur_overflow_list));
         } else { // cur_overflow_list == BUSY
-          suffix_tail->set_mark(NULL);
+          suffix_tail->set_mark_raw(NULL);
         }
         // ... and try to place spliced list back on overflow_list ...
         observed_overflow_list =
           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);

@@ -7950,12 +7950,12 @@
   assert(prefix != NULL, "control point invariant");
   const markOop proto = markOopDesc::prototype();
   oop next;
   NOT_PRODUCT(ssize_t n = 0;)
   for (cur = prefix; cur != NULL; cur = next) {
-    next = oop(cur->mark());
-    cur->set_mark(proto);   // until proven otherwise
+    next = oop(cur->mark_raw());
+    cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = work_q->push(cur);
     assert(res, "Bit off more than we can chew?");
     NOT_PRODUCT(n++;)
   }

@@ -7969,11 +7969,11 @@
 // Single-threaded
 void CMSCollector::push_on_overflow_list(oop p) {
   NOT_PRODUCT(_num_par_pushes++;)
   assert(oopDesc::is_oop(p), "Not an oop");
   preserve_mark_if_necessary(p);
-  p->set_mark((markOop)_overflow_list);
+  p->set_mark_raw((markOop)_overflow_list);
   _overflow_list = p;
 }
 
 // Multi-threaded; use CAS to prepend to overflow list
 void CMSCollector::par_push_on_overflow_list(oop p) {

@@ -7983,13 +7983,13 @@
   oop observed_overflow_list = _overflow_list;
   oop cur_overflow_list;
   do {
     cur_overflow_list = observed_overflow_list;
     if (cur_overflow_list != BUSY) {
-      p->set_mark(markOop(cur_overflow_list));
+      p->set_mark_raw(markOop(cur_overflow_list));
     } else {
-      p->set_mark(NULL);
+      p->set_mark_raw(NULL);
     }
     observed_overflow_list =
       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
   } while (cur_overflow_list != observed_overflow_list);
 }

@@ -8010,33 +8010,33 @@
 // failures where possible, thus, incrementally hardening the VM
 // in such low resource situations.
 void CMSCollector::preserve_mark_work(oop p, markOop m) {
   _preserved_oop_stack.push(p);
   _preserved_mark_stack.push(m);
-  assert(m == p->mark(), "Mark word changed");
+  assert(m == p->mark_raw(), "Mark word changed");
   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "bijection");
 }
 
 // Single threaded
 void CMSCollector::preserve_mark_if_necessary(oop p) {
-  markOop m = p->mark();
+  markOop m = p->mark_raw();
   if (m->must_be_preserved(p)) {
     preserve_mark_work(p, m);
   }
 }
 
 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
-  markOop m = p->mark();
+  markOop m = p->mark_raw();
   if (m->must_be_preserved(p)) {
     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
     // Even though we read the mark word without holding
     // the lock, we are assured that it will not change
     // because we "own" this oop, so no other thread can
     // be trying to push it on the overflow list; see
     // the assertion in preserve_mark_work() that checks
-    // that m == p->mark().
+    // that m == p->mark_raw().
     preserve_mark_work(p, m);
   }
 }
 
 // We should be able to do this multi-threaded,

@@ -8065,14 +8065,14 @@
 
   while (!_preserved_oop_stack.is_empty()) {
     oop p = _preserved_oop_stack.pop();
     assert(oopDesc::is_oop(p), "Should be an oop");
     assert(_span.contains(p), "oop should be in _span");
-    assert(p->mark() == markOopDesc::prototype(),
+    assert(p->mark_raw() == markOopDesc::prototype(),
            "Set when taken from overflow list");
     markOop m = _preserved_mark_stack.pop();
-    p->set_mark(m);
+    p->set_mark_raw(m);
   }
   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
          "stacks were cleared above");
 }
 
< prev index next >