< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page




  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/shared/adaptiveSizePolicy.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/suspendibleThreadSet.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/vmGCOperations.hpp"
  52 #include "gc/shared/weakProcessor.hpp"
  53 #include "logging/log.hpp"
  54 #include "memory/allocation.hpp"
  55 #include "memory/resourceArea.hpp"

  56 #include "oops/oop.inline.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/prefetch.inline.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/growableArray.hpp"
  64 
  65 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  66   assert(addr < _cm->finger(), "invariant");
  67   assert(addr >= _task->finger(), "invariant");
  68 
  69   // We move that task's local finger along.
  70   _task->move_finger_to(addr);
  71 
  72   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  73   // we only partially drain the local queue and global stack
  74   _task->drain_local_queue(true);
  75   _task->drain_global_stack(true);


1351 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1352   G1ConcurrentMark* _cm;
1353   G1CMTask*         _task;
1354   int               _ref_counter_limit;
1355   int               _ref_counter;
1356   bool              _is_serial;
1357  public:
1358   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1359     _cm(cm), _task(task), _is_serial(is_serial),
1360     _ref_counter_limit(G1RefProcDrainInterval) {
1361     assert(_ref_counter_limit > 0, "sanity");
1362     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1363     _ref_counter = _ref_counter_limit;
1364   }
1365 
1366   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1367   virtual void do_oop(      oop* p) { do_oop_work(p); }
1368 
1369   template <class T> void do_oop_work(T* p) {
1370     if (!_cm->has_overflown()) {
1371       oop obj = oopDesc::load_decode_heap_oop(p);
1372       _task->deal_with_reference(obj);
1373       _ref_counter--;
1374 
1375       if (_ref_counter == 0) {
1376         // We have dealt with _ref_counter_limit references, pushing them
1377         // and objects reachable from them on to the local stack (and
1378         // possibly the global stack). Call G1CMTask::do_marking_step() to
1379         // process these entries.
1380         //
1381         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1382         // there's nothing more to do (i.e. we're done with the entries that
1383         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1384         // above) or we overflow.
1385         //
1386         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1387         // flag while there may still be some work to do. (See the comment at
1388         // the beginning of G1CMTask::do_marking_step() for those conditions -
1389         // one of which is reaching the specified time target.) It is only
1390         // when G1CMTask::do_marking_step() returns without setting the
1391         // has_aborted() flag that the marking step has completed.




  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/shared/adaptiveSizePolicy.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/suspendibleThreadSet.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/vmGCOperations.hpp"
  52 #include "gc/shared/weakProcessor.hpp"
  53 #include "logging/log.hpp"
  54 #include "memory/allocation.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "oops/access.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/prefetch.inline.hpp"
  62 #include "services/memTracker.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/growableArray.hpp"
  65 
  66 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  67   assert(addr < _cm->finger(), "invariant");
  68   assert(addr >= _task->finger(), "invariant");
  69 
  70   // We move that task's local finger along.
  71   _task->move_finger_to(addr);
  72 
  73   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  74   // we only partially drain the local queue and global stack
  75   _task->drain_local_queue(true);
  76   _task->drain_global_stack(true);


1352 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1353   G1ConcurrentMark* _cm;
1354   G1CMTask*         _task;
1355   int               _ref_counter_limit;
1356   int               _ref_counter;
1357   bool              _is_serial;
1358  public:
1359   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1360     _cm(cm), _task(task), _is_serial(is_serial),
1361     _ref_counter_limit(G1RefProcDrainInterval) {
1362     assert(_ref_counter_limit > 0, "sanity");
1363     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1364     _ref_counter = _ref_counter_limit;
1365   }
1366 
1367   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1368   virtual void do_oop(      oop* p) { do_oop_work(p); }
1369 
1370   template <class T> void do_oop_work(T* p) {
1371     if (!_cm->has_overflown()) {
1372       oop obj = RawAccess<>::oop_load(p);
1373       _task->deal_with_reference(obj);
1374       _ref_counter--;
1375 
1376       if (_ref_counter == 0) {
1377         // We have dealt with _ref_counter_limit references, pushing them
1378         // and objects reachable from them on to the local stack (and
1379         // possibly the global stack). Call G1CMTask::do_marking_step() to
1380         // process these entries.
1381         //
1382         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1383         // there's nothing more to do (i.e. we're done with the entries that
1384         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1385         // above) or we overflow.
1386         //
1387         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1388         // flag while there may still be some work to do. (See the comment at
1389         // the beginning of G1CMTask::do_marking_step() for those conditions -
1390         // one of which is reaching the specified time target.) It is only
1391         // when G1CMTask::do_marking_step() returns without setting the
1392         // has_aborted() flag that the marking step has completed.


< prev index next >