< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 49290 : [mq]: JDK-8199735.01.patch


1024   }
1025   assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1026   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1027   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1028   if (obj_ptr == NULL) {
1029      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1030      if (obj_ptr == NULL) {
1031        return NULL;
1032      }
1033   }
1034   oop obj = oop(obj_ptr);
1035   OrderAccess::storestore();
1036   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1037   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1038   // IMPORTANT: See note on object initialization for CMS above.
1039   // Otherwise, copy the object.  Here we must be careful to insert the
1040   // klass pointer last, since this marks the block as an allocated object.
1041   // Except with compressed oops it's the mark word.
1042   HeapWord* old_ptr = (HeapWord*)old;
1043   // Restore the mark word copied above.
1044   obj->set_mark(m);
1045   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1046   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1047   OrderAccess::storestore();
1048 
1049   if (UseCompressedClassPointers) {
1050     // Copy gap missed by (aligned) header size calculation below
1051     obj->set_klass_gap(old->klass_gap());
1052   }
1053   if (word_sz > (size_t)oopDesc::header_size()) {
1054     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1055                                  obj_ptr + oopDesc::header_size(),
1056                                  word_sz - oopDesc::header_size());
1057   }
1058 
1059   // Now we can track the promoted object, if necessary.  We take care
1060   // to delay the transition from uninitialized to full object
1061   // (i.e., insertion of klass pointer) until after, so that it
1062   // atomically becomes a promoted object.
1063   if (promoInfo->tracking()) {
1064     promoInfo->track((PromotedObject*)obj, old->klass());


7791     return true;
7792   } else {
7793     return false;
7794   }
7795 }
7796 
7797 bool CMSCollector::par_simulate_overflow() {
7798   return simulate_overflow();
7799 }
7800 #endif
7801 
7802 // Single-threaded
7803 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7804   assert(stack->isEmpty(), "Expected precondition");
7805   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7806   size_t i = num;
7807   oop  cur = _overflow_list;
7808   const markOop proto = markOopDesc::prototype();
7809   NOT_PRODUCT(ssize_t n = 0;)
7810   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7811     next = oop(cur->mark());
7812     cur->set_mark(proto);   // until proven otherwise
7813     assert(oopDesc::is_oop(cur), "Should be an oop");
7814     bool res = stack->push(cur);
7815     assert(res, "Bit off more than can chew?");
7816     NOT_PRODUCT(n++;)
7817   }
7818   _overflow_list = cur;
7819 #ifndef PRODUCT
7820   assert(_num_par_pushes >= n, "Too many pops?");
7821   _num_par_pushes -=n;
7822 #endif
7823   return !stack->isEmpty();
7824 }
7825 
7826 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7827 // (MT-safe) Get a prefix of at most "num" from the list.
7828 // The overflow list is chained through the mark word of
7829 // each object in the list. We fetch the entire list,
7830 // break off a prefix of the right size and return the
7831 // remainder. If other threads try to take objects from
7832 // the overflow list at that time, they will wait for


7875     }
7876   }
7877   // If the list was found to be empty, or we spun long
7878   // enough, we give up and return empty-handed. If we leave
7879   // the list in the BUSY state below, it must be the case that
7880   // some other thread holds the overflow list and will set it
7881   // to a non-BUSY state in the future.
7882   if (prefix == NULL || prefix == BUSY) {
7883      // Nothing to take or waited long enough
7884      if (prefix == NULL) {
7885        // Write back the NULL in case we overwrote it with BUSY above
7886        // and it is still the same value.
7887        Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7888      }
7889      return false;
7890   }
7891   assert(prefix != NULL && prefix != BUSY, "Error");
7892   size_t i = num;
7893   oop cur = prefix;
7894   // Walk down the first "num" objects, unless we reach the end.
7895   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7896   if (cur->mark() == NULL) {
7897     // We have "num" or fewer elements in the list, so there
7898     // is nothing to return to the global list.
7899     // Write back the NULL in lieu of the BUSY we wrote
7900     // above, if it is still the same value.
7901     if (_overflow_list == BUSY) {
7902       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7903     }
7904   } else {
7905     // Chop off the suffix and return it to the global list.
7906     assert(cur->mark() != BUSY, "Error");
7907     oop suffix_head = cur->mark(); // suffix will be put back on global list
7908     cur->set_mark(NULL);           // break off suffix
7909     // It's possible that the list is still in the empty(busy) state
7910     // we left it in a short while ago; in that case we may be
7911     // able to place back the suffix without incurring the cost
7912     // of a walk down the list.
7913     oop observed_overflow_list = _overflow_list;
7914     oop cur_overflow_list = observed_overflow_list;
7915     bool attached = false;
7916     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7917       observed_overflow_list =
7918         Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7919       if (cur_overflow_list == observed_overflow_list) {
7920         attached = true;
7921         break;
7922       } else cur_overflow_list = observed_overflow_list;
7923     }
7924     if (!attached) {
7925       // Too bad, someone else sneaked in (at least) an element; we'll need
7926       // to do a splice. Find tail of suffix so we can prepend suffix to global
7927       // list.
7928       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7929       oop suffix_tail = cur;
7930       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7931              "Tautology");
7932       observed_overflow_list = _overflow_list;
7933       do {
7934         cur_overflow_list = observed_overflow_list;
7935         if (cur_overflow_list != BUSY) {
7936           // Do the splice ...
7937           suffix_tail->set_mark(markOop(cur_overflow_list));
7938         } else { // cur_overflow_list == BUSY
7939           suffix_tail->set_mark(NULL);
7940         }
7941         // ... and try to place spliced list back on overflow_list ...
7942         observed_overflow_list =
7943           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7944       } while (cur_overflow_list != observed_overflow_list);
7945       // ... until we have succeeded in doing so.
7946     }
7947   }
7948 
7949   // Push the prefix elements on work_q
7950   assert(prefix != NULL, "control point invariant");
7951   const markOop proto = markOopDesc::prototype();
7952   oop next;
7953   NOT_PRODUCT(ssize_t n = 0;)
7954   for (cur = prefix; cur != NULL; cur = next) {
7955     next = oop(cur->mark());
7956     cur->set_mark(proto);   // until proven otherwise
7957     assert(oopDesc::is_oop(cur), "Should be an oop");
7958     bool res = work_q->push(cur);
7959     assert(res, "Bit off more than we can chew?");
7960     NOT_PRODUCT(n++;)
7961   }
7962 #ifndef PRODUCT
7963   assert(_num_par_pushes >= n, "Too many pops?");
7964   Atomic::sub(n, &_num_par_pushes);
7965 #endif
7966   return true;
7967 }
7968 
7969 // Single-threaded
7970 void CMSCollector::push_on_overflow_list(oop p) {
7971   NOT_PRODUCT(_num_par_pushes++;)
7972   assert(oopDesc::is_oop(p), "Not an oop");
7973   preserve_mark_if_necessary(p);
7974   p->set_mark((markOop)_overflow_list);
7975   _overflow_list = p;
7976 }
7977 
7978 // Multi-threaded; use CAS to prepend to overflow list
7979 void CMSCollector::par_push_on_overflow_list(oop p) {
7980   NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
7981   assert(oopDesc::is_oop(p), "Not an oop");
7982   par_preserve_mark_if_necessary(p);
7983   oop observed_overflow_list = _overflow_list;
7984   oop cur_overflow_list;
7985   do {
7986     cur_overflow_list = observed_overflow_list;
7987     if (cur_overflow_list != BUSY) {
7988       p->set_mark(markOop(cur_overflow_list));
7989     } else {
7990       p->set_mark(NULL);
7991     }
7992     observed_overflow_list =
7993       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
7994   } while (cur_overflow_list != observed_overflow_list);
7995 }
7996 #undef BUSY
7997 
7998 // Single threaded
7999 // General Note on GrowableArray: pushes may silently fail
8000 // because we are (temporarily) out of C-heap for expanding
8001 // the stack. The problem is quite ubiquitous and affects
8002 // a lot of code in the JVM. The prudent thing for GrowableArray
8003 // to do (for now) is to exit with an error. However, that may
8004 // be too draconian in some cases because the caller may be
8005 // able to recover without much harm. For such cases, we
8006 // should probably introduce a "soft_push" method which returns
8007 // an indication of success or failure with the assumption that
8008 // the caller may be able to recover from a failure; code in
8009 // the VM can then be changed, incrementally, to deal with such
8010 // failures where possible, thus, incrementally hardening the VM
8011 // in such low resource situations.
8012 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8013   _preserved_oop_stack.push(p);
8014   _preserved_mark_stack.push(m);
8015   assert(m == p->mark(), "Mark word changed");
8016   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8017          "bijection");
8018 }
8019 
8020 // Single threaded
8021 void CMSCollector::preserve_mark_if_necessary(oop p) {
8022   markOop m = p->mark();
8023   if (m->must_be_preserved(p)) {
8024     preserve_mark_work(p, m);
8025   }
8026 }
8027 
8028 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8029   markOop m = p->mark();
8030   if (m->must_be_preserved(p)) {
8031     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8032     // Even though we read the mark word without holding
8033     // the lock, we are assured that it will not change
8034     // because we "own" this oop, so no other thread can
8035     // be trying to push it on the overflow list; see
8036     // the assertion in preserve_mark_work() that checks
8037     // that m == p->mark().
8038     preserve_mark_work(p, m);
8039   }
8040 }
8041 
8042 // We should be able to do this multi-threaded,
8043 // a chunk of stack being a task (this is
8044 // correct because each oop only ever appears
8045 // once in the overflow list. However, it's
8046 // not very easy to completely overlap this with
8047 // other operations, so will generally not be done
8048 // until all work's been completed. Because we
8049 // expect the preserved oop stack (set) to be small,
8050 // it's probably fine to do this single-threaded.
8051 // We can explore cleverer concurrent/overlapped/parallel
8052 // processing of preserved marks if we feel the
8053 // need for this in the future. Stack overflow should
8054 // be so rare in practice and, when it happens, its
8055 // effect on performance so great that this will
8056 // likely just be in the noise anyway.
8057 void CMSCollector::restore_preserved_marks_if_any() {
8058   assert(SafepointSynchronize::is_at_safepoint(),
8059          "world should be stopped");
8060   assert(Thread::current()->is_ConcurrentGC_thread() ||
8061          Thread::current()->is_VM_thread(),
8062          "should be single-threaded");
8063   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8064          "bijection");
8065 
8066   while (!_preserved_oop_stack.is_empty()) {
8067     oop p = _preserved_oop_stack.pop();
8068     assert(oopDesc::is_oop(p), "Should be an oop");
8069     assert(_span.contains(p), "oop should be in _span");
8070     assert(p->mark() == markOopDesc::prototype(),
8071            "Set when taken from overflow list");
8072     markOop m = _preserved_mark_stack.pop();
8073     p->set_mark(m);
8074   }
8075   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8076          "stacks were cleared above");
8077 }
8078 
8079 #ifndef PRODUCT
8080 bool CMSCollector::no_preserved_marks() const {
8081   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8082 }
8083 #endif
8084 
8085 // Transfer some number of overflown objects to usual marking
8086 // stack. Return true if some objects were transferred.
8087 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8088   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8089                     (size_t)ParGCDesiredObjsFromOverflowList);
8090 
8091   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8092   assert(_collector->overflow_list_is_empty() || res,
8093          "If list is not empty, we should have taken something");




1024   }
1025   assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1026   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1027   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1028   if (obj_ptr == NULL) {
1029      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1030      if (obj_ptr == NULL) {
1031        return NULL;
1032      }
1033   }
1034   oop obj = oop(obj_ptr);
1035   OrderAccess::storestore();
1036   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1037   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1038   // IMPORTANT: See note on object initialization for CMS above.
1039   // Otherwise, copy the object.  Here we must be careful to insert the
1040   // klass pointer last, since this marks the block as an allocated object.
1041   // Except with compressed oops it's the mark word.
1042   HeapWord* old_ptr = (HeapWord*)old;
1043   // Restore the mark word copied above.
1044   obj->set_mark_raw(m);
1045   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1046   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1047   OrderAccess::storestore();
1048 
1049   if (UseCompressedClassPointers) {
1050     // Copy gap missed by (aligned) header size calculation below
1051     obj->set_klass_gap(old->klass_gap());
1052   }
1053   if (word_sz > (size_t)oopDesc::header_size()) {
1054     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1055                                  obj_ptr + oopDesc::header_size(),
1056                                  word_sz - oopDesc::header_size());
1057   }
1058 
1059   // Now we can track the promoted object, if necessary.  We take care
1060   // to delay the transition from uninitialized to full object
1061   // (i.e., insertion of klass pointer) until after, so that it
1062   // atomically becomes a promoted object.
1063   if (promoInfo->tracking()) {
1064     promoInfo->track((PromotedObject*)obj, old->klass());


7791     return true;
7792   } else {
7793     return false;
7794   }
7795 }
7796 
7797 bool CMSCollector::par_simulate_overflow() {
7798   return simulate_overflow();
7799 }
7800 #endif
7801 
7802 // Single-threaded
7803 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7804   assert(stack->isEmpty(), "Expected precondition");
7805   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7806   size_t i = num;
7807   oop  cur = _overflow_list;
7808   const markOop proto = markOopDesc::prototype();
7809   NOT_PRODUCT(ssize_t n = 0;)
7810   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7811     next = oop(cur->mark_raw());
7812     cur->set_mark_raw(proto);   // until proven otherwise
7813     assert(oopDesc::is_oop(cur), "Should be an oop");
7814     bool res = stack->push(cur);
7815     assert(res, "Bit off more than can chew?");
7816     NOT_PRODUCT(n++;)
7817   }
7818   _overflow_list = cur;
7819 #ifndef PRODUCT
7820   assert(_num_par_pushes >= n, "Too many pops?");
7821   _num_par_pushes -=n;
7822 #endif
7823   return !stack->isEmpty();
7824 }
7825 
7826 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7827 // (MT-safe) Get a prefix of at most "num" from the list.
7828 // The overflow list is chained through the mark word of
7829 // each object in the list. We fetch the entire list,
7830 // break off a prefix of the right size and return the
7831 // remainder. If other threads try to take objects from
7832 // the overflow list at that time, they will wait for


7875     }
7876   }
7877   // If the list was found to be empty, or we spun long
7878   // enough, we give up and return empty-handed. If we leave
7879   // the list in the BUSY state below, it must be the case that
7880   // some other thread holds the overflow list and will set it
7881   // to a non-BUSY state in the future.
7882   if (prefix == NULL || prefix == BUSY) {
7883      // Nothing to take or waited long enough
7884      if (prefix == NULL) {
7885        // Write back the NULL in case we overwrote it with BUSY above
7886        // and it is still the same value.
7887        Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7888      }
7889      return false;
7890   }
7891   assert(prefix != NULL && prefix != BUSY, "Error");
7892   size_t i = num;
7893   oop cur = prefix;
7894   // Walk down the first "num" objects, unless we reach the end.
7895   for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
7896   if (cur->mark_raw() == NULL) {
7897     // We have "num" or fewer elements in the list, so there
7898     // is nothing to return to the global list.
7899     // Write back the NULL in lieu of the BUSY we wrote
7900     // above, if it is still the same value.
7901     if (_overflow_list == BUSY) {
7902       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7903     }
7904   } else {
7905     // Chop off the suffix and return it to the global list.
7906     assert(cur->mark_raw() != BUSY, "Error");
7907     oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
7908     cur->set_mark_raw(NULL);           // break off suffix
7909     // It's possible that the list is still in the empty(busy) state
7910     // we left it in a short while ago; in that case we may be
7911     // able to place back the suffix without incurring the cost
7912     // of a walk down the list.
7913     oop observed_overflow_list = _overflow_list;
7914     oop cur_overflow_list = observed_overflow_list;
7915     bool attached = false;
7916     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7917       observed_overflow_list =
7918         Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7919       if (cur_overflow_list == observed_overflow_list) {
7920         attached = true;
7921         break;
7922       } else cur_overflow_list = observed_overflow_list;
7923     }
7924     if (!attached) {
7925       // Too bad, someone else sneaked in (at least) an element; we'll need
7926       // to do a splice. Find tail of suffix so we can prepend suffix to global
7927       // list.
7928       for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
7929       oop suffix_tail = cur;
7930       assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
7931              "Tautology");
7932       observed_overflow_list = _overflow_list;
7933       do {
7934         cur_overflow_list = observed_overflow_list;
7935         if (cur_overflow_list != BUSY) {
7936           // Do the splice ...
7937           suffix_tail->set_mark_raw(markOop(cur_overflow_list));
7938         } else { // cur_overflow_list == BUSY
7939           suffix_tail->set_mark_raw(NULL);
7940         }
7941         // ... and try to place spliced list back on overflow_list ...
7942         observed_overflow_list =
7943           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7944       } while (cur_overflow_list != observed_overflow_list);
7945       // ... until we have succeeded in doing so.
7946     }
7947   }
7948 
7949   // Push the prefix elements on work_q
7950   assert(prefix != NULL, "control point invariant");
7951   const markOop proto = markOopDesc::prototype();
7952   oop next;
7953   NOT_PRODUCT(ssize_t n = 0;)
7954   for (cur = prefix; cur != NULL; cur = next) {
7955     next = oop(cur->mark_raw());
7956     cur->set_mark_raw(proto);   // until proven otherwise
7957     assert(oopDesc::is_oop(cur), "Should be an oop");
7958     bool res = work_q->push(cur);
7959     assert(res, "Bit off more than we can chew?");
7960     NOT_PRODUCT(n++;)
7961   }
7962 #ifndef PRODUCT
7963   assert(_num_par_pushes >= n, "Too many pops?");
7964   Atomic::sub(n, &_num_par_pushes);
7965 #endif
7966   return true;
7967 }
7968 
7969 // Single-threaded
7970 void CMSCollector::push_on_overflow_list(oop p) {
7971   NOT_PRODUCT(_num_par_pushes++;)
7972   assert(oopDesc::is_oop(p), "Not an oop");
7973   preserve_mark_if_necessary(p);
7974   p->set_mark_raw((markOop)_overflow_list);
7975   _overflow_list = p;
7976 }
7977 
7978 // Multi-threaded; use CAS to prepend to overflow list
7979 void CMSCollector::par_push_on_overflow_list(oop p) {
7980   NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
7981   assert(oopDesc::is_oop(p), "Not an oop");
7982   par_preserve_mark_if_necessary(p);
7983   oop observed_overflow_list = _overflow_list;
7984   oop cur_overflow_list;
7985   do {
7986     cur_overflow_list = observed_overflow_list;
7987     if (cur_overflow_list != BUSY) {
7988       p->set_mark_raw(markOop(cur_overflow_list));
7989     } else {
7990       p->set_mark_raw(NULL);
7991     }
7992     observed_overflow_list =
7993       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
7994   } while (cur_overflow_list != observed_overflow_list);
7995 }
7996 #undef BUSY
7997 
7998 // Single threaded
7999 // General Note on GrowableArray: pushes may silently fail
8000 // because we are (temporarily) out of C-heap for expanding
8001 // the stack. The problem is quite ubiquitous and affects
8002 // a lot of code in the JVM. The prudent thing for GrowableArray
8003 // to do (for now) is to exit with an error. However, that may
8004 // be too draconian in some cases because the caller may be
8005 // able to recover without much harm. For such cases, we
8006 // should probably introduce a "soft_push" method which returns
8007 // an indication of success or failure with the assumption that
8008 // the caller may be able to recover from a failure; code in
8009 // the VM can then be changed, incrementally, to deal with such
8010 // failures where possible, thus, incrementally hardening the VM
8011 // in such low resource situations.
8012 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8013   _preserved_oop_stack.push(p);
8014   _preserved_mark_stack.push(m);
8015   assert(m == p->mark_raw(), "Mark word changed");
8016   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8017          "bijection");
8018 }
8019 
8020 // Single threaded
8021 void CMSCollector::preserve_mark_if_necessary(oop p) {
8022   markOop m = p->mark_raw();
8023   if (m->must_be_preserved(p)) {
8024     preserve_mark_work(p, m);
8025   }
8026 }
8027 
8028 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8029   markOop m = p->mark_raw();
8030   if (m->must_be_preserved(p)) {
8031     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8032     // Even though we read the mark word without holding
8033     // the lock, we are assured that it will not change
8034     // because we "own" this oop, so no other thread can
8035     // be trying to push it on the overflow list; see
8036     // the assertion in preserve_mark_work() that checks
8037     // that m == p->mark_raw().
8038     preserve_mark_work(p, m);
8039   }
8040 }
8041 
8042 // We should be able to do this multi-threaded,
8043 // a chunk of stack being a task (this is
8044 // correct because each oop only ever appears
8045 // once in the overflow list. However, it's
8046 // not very easy to completely overlap this with
8047 // other operations, so will generally not be done
8048 // until all work's been completed. Because we
8049 // expect the preserved oop stack (set) to be small,
8050 // it's probably fine to do this single-threaded.
8051 // We can explore cleverer concurrent/overlapped/parallel
8052 // processing of preserved marks if we feel the
8053 // need for this in the future. Stack overflow should
8054 // be so rare in practice and, when it happens, its
8055 // effect on performance so great that this will
8056 // likely just be in the noise anyway.
8057 void CMSCollector::restore_preserved_marks_if_any() {
8058   assert(SafepointSynchronize::is_at_safepoint(),
8059          "world should be stopped");
8060   assert(Thread::current()->is_ConcurrentGC_thread() ||
8061          Thread::current()->is_VM_thread(),
8062          "should be single-threaded");
8063   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8064          "bijection");
8065 
8066   while (!_preserved_oop_stack.is_empty()) {
8067     oop p = _preserved_oop_stack.pop();
8068     assert(oopDesc::is_oop(p), "Should be an oop");
8069     assert(_span.contains(p), "oop should be in _span");
8070     assert(p->mark_raw() == markOopDesc::prototype(),
8071            "Set when taken from overflow list");
8072     markOop m = _preserved_mark_stack.pop();
8073     p->set_mark_raw(m);
8074   }
8075   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8076          "stacks were cleared above");
8077 }
8078 
8079 #ifndef PRODUCT
8080 bool CMSCollector::no_preserved_marks() const {
8081   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8082 }
8083 #endif
8084 
8085 // Transfer some number of overflown objects to usual marking
8086 // stack. Return true if some objects were transferred.
8087 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8088   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8089                     (size_t)ParGCDesiredObjsFromOverflowList);
8090 
8091   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8092   assert(_collector->overflow_list_is_empty() || res,
8093          "If list is not empty, we should have taken something");


< prev index next >