< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49484 : imported patch 8197573-remove-secondary-free-list
rev 49485 : imported patch 8197573-stefanj-review2
rev 49493 : imported patch 8199326-remove-gc-time-stamp-logic-only
rev 49494 : imported patch 8199742-collectorstate-fixes
rev 49496 : imported patch 8151171-renamings
rev 49498 : imported patch 8200234-g1concurrentmark-refactorings
rev 49499 : [mq]: 8200234-stefanj-review


  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1OopClosures.inline.hpp"
  34 #include "gc/g1/g1Policy.hpp"
  35 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  36 #include "gc/g1/g1StringDedup.hpp"
  37 #include "gc/g1/heapRegion.inline.hpp"
  38 #include "gc/g1/heapRegionRemSet.hpp"
  39 #include "gc/g1/heapRegionSet.inline.hpp"
  40 #include "gc/shared/adaptiveSizePolicy.hpp"
  41 #include "gc/shared/gcId.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/genOopClosures.inline.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/suspendibleThreadSet.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "gc/shared/weakProcessor.hpp"

  52 #include "logging/log.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/access.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/prefetch.inline.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/growableArray.hpp"
  64 
  65 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  66   assert(addr < _cm->finger(), "invariant");
  67   assert(addr >= _task->finger(), "invariant");
  68 
  69   // We move that task's local finger along.
  70   _task->move_finger_to(addr);
  71 


 519   _top_at_rebuild_starts[region_idx] = NULL;
 520   _region_mark_stats[region_idx].clear();
 521 }
 522 
 523 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 524   uint const region_idx = r->hrm_index();
 525   if (r->is_humongous()) {
 526     assert(r->is_starts_humongous(), "Got humongous continues region here");
 527     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 528     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 529       clear_statistics_in_region(j);
 530     }
 531   } else {
 532     clear_statistics_in_region(region_idx);
 533   } 
 534 }
 535 
 536 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 537   assert_at_safepoint_on_vm_thread();
 538 
 539   G1CMBitMap* const bitmap = _g1h->collector_state()->mark_or_rebuild_in_progress() ? _next_mark_bitmap : _prev_mark_bitmap;
 540   // Need to clear mark bit of the humongous object if already set and during a marking cycle.
 541   if (bitmap->is_marked(r->bottom())) {
 542     bitmap->clear(r->bottom());
 543   }
 544 
 545   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 546     return;
 547   }
 548 
 549   // Clear any statistics about the region gathered so far.
 550   clear_statistics(r);
 551 }
 552 
 553 void G1ConcurrentMark::reset_marking_for_restart() {
 554   _global_mark_stack.set_empty();
 555 
 556   // Expand the marking stack, if we have to and if we can.
 557   if (has_overflown()) {
 558     _global_mark_stack.expand();
 559 
 560     uint max_regions = _g1h->max_regions();
 561     for (uint i = 0; i < max_regions; i++) {
 562       _region_mark_stats[i].clear_during_overflow();


 990 
 991   // Parallel task terminator is set in "set_concurrency_and_phase()"
 992   set_concurrency_and_phase(active_workers, true /* concurrent */);
 993 
 994   G1CMConcurrentMarkingTask marking_task(this);
 995   _concurrent_workers->run_task(&marking_task);
 996   print_stats();
 997 }
 998 
 999 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1000   G1HeapVerifier* verifier = _g1h->verifier();
1001 
1002   verifier->verify_region_sets_optional();
1003 
1004   if (VerifyDuringGC) {
1005     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
1006 
1007     size_t const BufLen = 512;
1008     char buffer[BufLen];
1009 
1010     os::snprintf(buffer, BufLen, "During GC (%s)", caller);
1011     verifier->verify(type, vo, buffer);
1012   }
1013 
1014   verifier->check_bitmaps(caller);
1015 }
1016 
1017 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1018   G1CollectedHeap* _g1h;
1019   G1ConcurrentMark* _cm;
1020 
1021   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1022 
1023   void update_remset_before_rebuild(HeapRegion * hr) {
1024     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1025 
1026     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1027     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1028     if (selected_for_rebuild) {
1029       _num_regions_selected_for_rebuild++;
1030     }




  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1OopClosures.inline.hpp"
  34 #include "gc/g1/g1Policy.hpp"
  35 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  36 #include "gc/g1/g1StringDedup.hpp"
  37 #include "gc/g1/heapRegion.inline.hpp"
  38 #include "gc/g1/heapRegionRemSet.hpp"
  39 #include "gc/g1/heapRegionSet.inline.hpp"
  40 #include "gc/shared/adaptiveSizePolicy.hpp"
  41 #include "gc/shared/gcId.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/genOopClosures.inline.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/suspendibleThreadSet.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "gc/shared/weakProcessor.hpp"
  52 #include "include/jvm.h"
  53 #include "logging/log.hpp"
  54 #include "memory/allocation.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "oops/access.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/prefetch.inline.hpp"
  62 #include "services/memTracker.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/growableArray.hpp"
  65 
  66 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  67   assert(addr < _cm->finger(), "invariant");
  68   assert(addr >= _task->finger(), "invariant");
  69 
  70   // We move that task's local finger along.
  71   _task->move_finger_to(addr);
  72 


 520   _top_at_rebuild_starts[region_idx] = NULL;
 521   _region_mark_stats[region_idx].clear();
 522 }
 523 
 524 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 525   uint const region_idx = r->hrm_index();
 526   if (r->is_humongous()) {
 527     assert(r->is_starts_humongous(), "Got humongous continues region here");
 528     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 529     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 530       clear_statistics_in_region(j);
 531     }
 532   } else {
 533     clear_statistics_in_region(region_idx);
 534   } 
 535 }
 536 
 537 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 538   assert_at_safepoint_on_vm_thread();
 539 
 540   // Need to clear mark bit of the humongous object.
 541   if (_next_mark_bitmap->is_marked(r->bottom())) {
 542     _next_mark_bitmap->clear(r->bottom());

 543   }
 544 
 545   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 546     return;
 547   }
 548 
 549   // Clear any statistics about the region gathered so far.
 550   clear_statistics(r);
 551 }
 552 
 553 void G1ConcurrentMark::reset_marking_for_restart() {
 554   _global_mark_stack.set_empty();
 555 
 556   // Expand the marking stack, if we have to and if we can.
 557   if (has_overflown()) {
 558     _global_mark_stack.expand();
 559 
 560     uint max_regions = _g1h->max_regions();
 561     for (uint i = 0; i < max_regions; i++) {
 562       _region_mark_stats[i].clear_during_overflow();


 990 
 991   // Parallel task terminator is set in "set_concurrency_and_phase()"
 992   set_concurrency_and_phase(active_workers, true /* concurrent */);
 993 
 994   G1CMConcurrentMarkingTask marking_task(this);
 995   _concurrent_workers->run_task(&marking_task);
 996   print_stats();
 997 }
 998 
 999 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1000   G1HeapVerifier* verifier = _g1h->verifier();
1001 
1002   verifier->verify_region_sets_optional();
1003 
1004   if (VerifyDuringGC) {
1005     GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
1006 
1007     size_t const BufLen = 512;
1008     char buffer[BufLen];
1009 
1010     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1011     verifier->verify(type, vo, buffer);
1012   }
1013 
1014   verifier->check_bitmaps(caller);
1015 }
1016 
1017 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1018   G1CollectedHeap* _g1h;
1019   G1ConcurrentMark* _cm;
1020 
1021   uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1022 
1023   void update_remset_before_rebuild(HeapRegion * hr) {
1024     G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1025 
1026     size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1027     bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1028     if (selected_for_rebuild) {
1029       _num_regions_selected_for_rebuild++;
1030     }


< prev index next >