< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

@@ -181,11 +181,11 @@
 
 // This struct contains per-thread things necessary to support parallel
 // young-gen collection.
 class CMSParGCThreadState: public CHeapObj<mtGC> {
  public:
-  CFLS_LAB lab;
+  CompactibleFreeListSpaceLAB lab;
   PromotionInfo promo;
 
   // Constructor.
   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
     promo.setSpace(cfls);

@@ -1108,11 +1108,11 @@
   // hip and should be fixed by untying them.
 }
 
 bool CMSCollector::shouldConcurrentCollect() {
   if (_full_gc_requested) {
-    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or gc_locker)");
+    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or GCLocker)");
     return true;
   }
 
   FreelistLocker x(this);
   // ------------------------------------------------------------------

@@ -1267,16 +1267,16 @@
                            size_t size,
                            bool   tlab)
 {
   // The following "if" branch is present for defensive reasons.
   // In the current uses of this interface, it can be replaced with:
-  // assert(!GC_locker.is_active(), "Can't be called otherwise");
+  // assert(!GCLocker.is_active(), "Can't be called otherwise");
   // But I am not placing that assert here to allow future
   // generality in invoking this interface.
-  if (GC_locker::is_active()) {
-    // A consistency test for GC_locker
-    assert(GC_locker::needs_gc(), "Should have been set already");
+  if (GCLocker::is_active()) {
+    // A consistency test for GCLocker
+    assert(GCLocker::needs_gc(), "Should have been set already");
     // Skip this foreground collection, instead
     // expanding the heap if necessary.
     // Need the free list locks for the call to free() in compute_new_size()
     compute_new_size();
     return;

@@ -3270,11 +3270,11 @@
       if (prev_obj < span.end()) {
         MemRegion my_span = MemRegion(prev_obj, span.end());
         // Do the marking work within a non-empty span --
         // the last argument to the constructor indicates whether the
         // iteration should be incremental with periodic yields.
-        Par_MarkFromRootsClosure cl(this, _collector, my_span,
+        ParMarkFromRootsClosure cl(this, _collector, my_span,
                                     &_collector->_markBitMap,
                                     work_queue(i),
                                     &_collector->_markStack);
         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
       } // else nothing to do for this task

@@ -3289,22 +3289,22 @@
   // have been bumped up by the thread that claimed the last
   // task.
   pst->all_tasks_completed();
 }
 
-class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
+class ParConcMarkingClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   CMSConcMarkingTask* _task;
   MemRegion     _span;
   CMSBitMap*    _bit_map;
   CMSMarkStack* _overflow_stack;
   OopTaskQueue* _work_queue;
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
+  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
     MetadataAwareOopClosure(collector->ref_processor()),
     _collector(collector),
     _task(task),
     _span(collector->_span),

@@ -3328,11 +3328,11 @@
 // the salient assumption here is that any references
 // that are in these stolen objects being scanned must
 // already have been initialized (else they would not have
 // been published), so we do not need to check for
 // uninitialized objects before pushing here.
-void Par_ConcMarkingClosure::do_oop(oop obj) {
+void ParConcMarkingClosure::do_oop(oop obj) {
   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {

@@ -3364,14 +3364,14 @@
     } // Else, some other thread got there first
     do_yield_check();
   }
 }
 
-void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
-void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
+void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
 
-void Par_ConcMarkingClosure::trim_queue(size_t max) {
+void ParConcMarkingClosure::trim_queue(size_t max) {
   while (_work_queue->size() > max) {
     oop new_oop;
     if (_work_queue->pop_local(new_oop)) {
       assert(new_oop->is_oop(), "Should be an oop");
       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");

@@ -3383,11 +3383,11 @@
 }
 
 // Upon stack overflow, we discard (part of) the stack,
 // remembering the least address amongst those discarded
 // in CMSCollector's _restart_address.
-void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
+void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
   // We need to do this under a mutex to prevent other
   // workers from interfering with the work done below.
   MutexLockerEx ml(_overflow_stack->par_lock(),
                    Mutex::_no_safepoint_check_flag);
   // Remember the least grey address discarded

@@ -3402,11 +3402,11 @@
   OopTaskQueue* work_q = work_queue(i);
   oop obj_to_scan;
   CMSBitMap* bm = &(_collector->_markBitMap);
   CMSMarkStack* ovflw = &(_collector->_markStack);
   int* seed = _collector->hash_seed(i);
-  Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
+  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
   while (true) {
     cl.trim_queue(0);
     assert(work_q->size() == 0, "Should have been emptied above");
     if (get_work_from_overflow_stack(ovflw, work_q)) {
       // Can't assert below because the work obtained from the

@@ -4244,11 +4244,11 @@
   HandleMark   hm;
 
   // ---------- scan from roots --------------
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
+  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
 
   // ---------- young gen roots --------------
   {
     work_on_young_gen_roots(worker_id, &par_mri_cl);
     _timer.stop();

@@ -4310,14 +4310,14 @@
   void work(uint worker_id);
 
  private:
   // ... of  dirty cards in old space
   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
-                                  Par_MarkRefsIntoAndScanClosure* cl);
+                                  ParMarkRefsIntoAndScanClosure* cl);
 
   // ... work stealing for the above
-  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
+  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
 };
 
 class RemarkKlassClosure : public KlassClosure {
   KlassToOopClosure _cm_klass_closure;
  public:

@@ -4359,11 +4359,11 @@
   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
 }
 
 // work_queue(i) is passed to the closure
-// Par_MarkRefsIntoAndScanClosure.  The "i" parameter
+// ParMarkRefsIntoAndScanClosure.  The "i" parameter
 // also is passed to do_dirty_card_rescan_tasks() and to
 // do_work_steal() to select the i-th task_queue.
 
 void CMSParRemarkTask::work(uint worker_id) {
   elapsedTimer _timer;

@@ -4371,11 +4371,11 @@
   HandleMark   hm;
 
   // ---------- rescan from roots --------------
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
+  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
     work_queue(worker_id));
 
   // Rescan young gen roots first since these are likely

@@ -4520,11 +4520,11 @@
 }
 
 void
 CMSParRemarkTask::do_dirty_card_rescan_tasks(
   CompactibleFreeListSpace* sp, int i,
-  Par_MarkRefsIntoAndScanClosure* cl) {
+  ParMarkRefsIntoAndScanClosure* cl) {
   // Until all tasks completed:
   // . claim an unclaimed task
   // . compute region boundaries corresponding to task claimed
   // . transfer dirty bits ct->mut for that region
   // . apply rescanclosure to dirty mut bits for that region

@@ -4612,11 +4612,11 @@
   pst->all_tasks_completed();  // declare that i am done
 }
 
 // . see if we can share work_queues with ParNew? XXX
 void
-CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
+CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
                                 int* seed) {
   OopTaskQueue* work_q = work_queue(i);
   NOT_PRODUCT(int num_steals = 0;)
   oop obj_to_scan;
   CMSBitMap* bm = &(_collector->_markBitMap);

@@ -5830,31 +5830,31 @@
 }
 
 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
 
-Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
+ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
   MemRegion span, CMSBitMap* bitMap):
     _span(span),
     _bitMap(bitMap)
 {
   assert(ref_processor() == NULL, "deliberately left NULL");
   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
 }
 
-void Par_MarkRefsIntoClosure::do_oop(oop obj) {
+void ParMarkRefsIntoClosure::do_oop(oop obj) {
   // if p points into _span, then mark corresponding bit in _markBitMap
   assert(obj->is_oop(), "expected an oop");
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr)) {
     // this should be made more efficient
     _bitMap->par_mark(addr);
   }
 }
 
-void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
-void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
+void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
+void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
 
 // A variant of the above, used for CMS marking verification.
 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
     _span(span),

@@ -5987,22 +5987,22 @@
   _bit_map->lock()->lock_without_safepoint_check();
   _collector->startTimer();
 }
 
 ///////////////////////////////////////////////////////////
-// Par_MarkRefsIntoAndScanClosure: a parallel version of
+// ParMarkRefsIntoAndScanClosure: a parallel version of
 //                                 MarkRefsIntoAndScanClosure
 ///////////////////////////////////////////////////////////
-Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
+ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
   CMSBitMap* bit_map, OopTaskQueue* work_queue):
   _span(span),
   _bit_map(bit_map),
   _work_queue(work_queue),
   _low_water_mark(MIN2((work_queue->max_elems()/4),
                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
-  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
+  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
 {
   // FIXME: Should initialize in base class constructor.
   assert(rp != NULL, "ref_processor shouldn't be NULL");
   set_ref_processor_internal(rp);
 }

@@ -6012,11 +6012,11 @@
 // the unmarked oops. The marks are made in the marking bit map and
 // the work_queue is used for keeping the (newly) grey objects during
 // the scan phase whence they are also available for stealing by parallel
 // threads. Since the marking bit map is shared, updates are
 // synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
   if (obj != NULL) {
     // Ignore mark word because this could be an already marked oop
     // that may be chained at the end of the overflow list.
     assert(obj->is_oop(true), "expected an oop");
     HeapWord* addr = (HeapWord*)obj;

@@ -6039,12 +6039,12 @@
       } // Else, another thread claimed the object
     }
   }
 }
 
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
-void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
+void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
 
 // This closure is used to rescan the marked objects on the dirty cards
 // in the mod union table and the card table proper.
 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
   oop p, MemRegion mr) {

@@ -6424,11 +6424,11 @@
     do_yield_check();
   }
   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
 }
 
-Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
+ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
                        CMSCollector* collector, MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
                        CMSMarkStack*  overflow_stack):
   _collector(collector),

@@ -6447,11 +6447,11 @@
   assert(_span.contains(_finger), "Out of bounds _finger?");
 }
 
 // Should revisit to see if this should be restructured for
 // greater efficiency.
-bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
+bool ParMarkFromRootsClosure::do_bit(size_t offset) {
   if (_skip_bits > 0) {
     _skip_bits--;
     return true;
   }
   // convert offset into a HeapWord*

@@ -6472,11 +6472,11 @@
   }
   scan_oops_in_oop(addr);
   return true;
 }
 
-void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
+void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
   assert(_bit_map->isMarked(ptr), "expected bit to be set");
   // Should we assert that our work queue is empty or
   // below some drain limit?
   assert(_work_queue->size() == 0,
          "should drain stack to limit stack usage");

@@ -6522,11 +6522,11 @@
   }
 
   // Note: the local finger doesn't advance while we drain
   // the stack below, but the global finger sure can and will.
   HeapWord** gfa = _task->global_finger_addr();
-  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
+  ParPushOrMarkClosure pushOrMarkClosure(_collector,
                                       _span, _bit_map,
                                       _work_queue,
                                       _overflow_stack,
                                       _finger,
                                       gfa, this);

@@ -6555,11 +6555,11 @@
   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
 }
 
 // Yield in response to a request from VM Thread or
 // from mutators.
-void Par_MarkFromRootsClosure::do_yield_work() {
+void ParMarkFromRootsClosure::do_yield_work() {
   assert(_task != NULL, "sanity");
   _task->yield();
 }
 
 // A variant of the above used for verifying CMS marking work.

@@ -6682,18 +6682,18 @@
   _markStack(markStack),
   _finger(finger),
   _parent(parent)
 { }
 
-Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
+ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
                      MemRegion span,
                      CMSBitMap* bit_map,
                      OopTaskQueue* work_queue,
                      CMSMarkStack*  overflow_stack,
                      HeapWord* finger,
                      HeapWord** global_finger_addr,
-                     Par_MarkFromRootsClosure* parent) :
+                                           ParMarkFromRootsClosure* parent) :
   MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
   _whole_span(collector->_span),
   _span(span),
   _bit_map(bit_map),

@@ -6727,11 +6727,11 @@
 }
 
 // Upon stack overflow, we discard (part of) the stack,
 // remembering the least address amongst those discarded
 // in CMSCollector's _restart_address.
-void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
+void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
   // We need to do this under a mutex to prevent other
   // workers from interfering with the work done below.
   MutexLockerEx ml(_overflow_stack->par_lock(),
                    Mutex::_no_safepoint_check_flag);
   // Remember the least grey address discarded

@@ -6774,11 +6774,11 @@
 }
 
 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 
-void Par_PushOrMarkClosure::do_oop(oop obj) {
+void ParPushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
   HeapWord* addr = (HeapWord*)obj;
   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black

@@ -6820,12 +6820,12 @@
     }
     do_yield_check();
   }
 }
 
-void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
-void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
+void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
 
 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
                                        MemRegion span,
                                        ReferenceProcessor* rp,
                                        CMSBitMap* bit_map,

@@ -6898,11 +6898,11 @@
       }
     }
   }
 }
 
-Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
+ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
                                                MemRegion span,
                                                ReferenceProcessor* rp,
                                                CMSBitMap* bit_map,
                                                OopTaskQueue* work_queue):
   MetadataAwareOopClosure(rp),

@@ -6917,11 +6917,11 @@
 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
 
 // Grey object rescan during second checkpoint phase --
 // the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop obj) {
+void ParPushAndMarkClosure::do_oop(oop obj) {
   // In the assert below, we ignore the mark word because
   // this oop may point to an already visited object that is
   // on the overflow stack (in which case the mark word has
   // been hijacked for chaining into the overflow stack --
   // if this is the last object in the overflow stack then

@@ -6957,12 +6957,12 @@
       }
     } // Else, some other thread got there first
   }
 }
 
-void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
-void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
+void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
 
 void CMSPrecleanRefsYieldClosure::do_yield_work() {
   Mutex* bml = _collector->bitMapLock();
   assert_lock_strong(bml);
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
< prev index next >