< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 48820 : [mq]: 8196602-heapregionclosure-renaming


 574 
 575 G1ConcurrentMark::~G1ConcurrentMark() {
 576   // The G1ConcurrentMark instance is never freed.
 577   ShouldNotReachHere();
 578 }
 579 
 580 class G1ClearBitMapTask : public AbstractGangTask {
 581 public:
 582   static size_t chunk_size() { return M; }
 583 
 584 private:
 585   // Heap region closure used for clearing the given mark bitmap.
 586   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 587   private:
 588     G1CMBitMap* _bitmap;
 589     G1ConcurrentMark* _cm;
 590   public:
 591     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 592     }
 593 
 594     virtual bool doHeapRegion(HeapRegion* r) {
 595       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 596 
 597       HeapWord* cur = r->bottom();
 598       HeapWord* const end = r->end();
 599 
 600       while (cur < end) {
 601         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 602         _bitmap->clear_range(mr);
 603 
 604         cur += chunk_size_in_words;
 605 
 606         // Abort iteration if after yielding the marking has been aborted.
 607         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 608           return true;
 609         }
 610         // Repeat the asserts from before the start of the closure. We will do them
 611         // as asserts here to minimize their overhead on the product. However, we
 612         // will have them as guarantees at the beginning / end of the bitmap
 613         // clearing to get some checking in the product.
 614         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");


 621   };
 622 
 623   G1ClearBitmapHRClosure _cl;
 624   HeapRegionClaimer _hr_claimer;
 625   bool _suspendible; // If the task is suspendible, workers must join the STS.
 626 
 627 public:
 628   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 629     AbstractGangTask("G1 Clear Bitmap"),
 630     _cl(bitmap, suspendible ? cm : NULL),
 631     _hr_claimer(n_workers),
 632     _suspendible(suspendible)
 633   { }
 634 
 635   void work(uint worker_id) {
 636     SuspendibleThreadSetJoiner sts_join(_suspendible);
 637     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 638   }
 639 
 640   bool is_complete() {
 641     return _cl.complete();
 642   }
 643 };
 644 
 645 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 646   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 647 
 648   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 649   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 650 
 651   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 652 
 653   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 654 
 655   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 656   workers->run_task(&cl, num_workers);
 657   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 658 }
 659 
 660 void G1ConcurrentMark::cleanup_for_next_mark() {
 661   // Make sure that the concurrent mark thread looks to still be in


 677     DEBUG_ONLY(verify_live_data_clear());
 678   }
 679 
 680   // Repeat the asserts from above.
 681   guarantee(cm_thread()->during_cycle(), "invariant");
 682   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 683 }
 684 
 685 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 686   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 687   clear_bitmap(_prev_mark_bitmap, workers, false);
 688 }
 689 
 690 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 691   G1CMBitMap* _bitmap;
 692   bool _error;
 693  public:
 694   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 695   }
 696 
 697   virtual bool doHeapRegion(HeapRegion* r) {
 698     // This closure can be called concurrently to the mutator, so we must make sure
 699     // that the result of the getNextMarkedWordAddress() call is compared to the
 700     // value passed to it as limit to detect any found bits.
 701     // end never changes in G1.
 702     HeapWord* end = r->end();
 703     return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
 704   }
 705 };
 706 
 707 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
 708   CheckBitmapClearHRClosure cl(_next_mark_bitmap);
 709   _g1h->heap_region_iterate(&cl);
 710   return cl.complete();
 711 }
 712 
 713 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 714 public:
 715   bool doHeapRegion(HeapRegion* r) {
 716     r->note_start_of_marking();
 717     return false;
 718   }
 719 };
 720 
 721 void G1ConcurrentMark::checkpoint_roots_initial_pre() {
 722   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 723 
 724   _has_aborted = false;
 725 
 726   // Initialize marking structures. This has to be done in a STW phase.
 727   reset();
 728 
 729   // For each region note start of marking.
 730   NoteStartOfMarkHRClosure startcl;
 731   g1h->heap_region_iterate(&startcl);
 732 }
 733 
 734 
 735 void G1ConcurrentMark::checkpoint_roots_initial_post() {


1077   FreeRegionList* _local_cleanup_list;
1078   uint _old_regions_removed;
1079   uint _humongous_regions_removed;
1080   HRRSCleanupTask* _hrrs_cleanup_task;
1081 
1082 public:
1083   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1084                              FreeRegionList* local_cleanup_list,
1085                              HRRSCleanupTask* hrrs_cleanup_task) :
1086     _g1(g1),
1087     _freed_bytes(0),
1088     _local_cleanup_list(local_cleanup_list),
1089     _old_regions_removed(0),
1090     _humongous_regions_removed(0),
1091     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1092 
1093   size_t freed_bytes() { return _freed_bytes; }
1094   const uint old_regions_removed() { return _old_regions_removed; }
1095   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1096 
1097   bool doHeapRegion(HeapRegion *hr) {
1098     _g1->reset_gc_time_stamps(hr);
1099     hr->note_end_of_marking();
1100 
1101     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1102       _freed_bytes += hr->used();
1103       hr->set_containing_set(NULL);
1104       if (hr->is_humongous()) {
1105         _humongous_regions_removed++;
1106         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1107       } else {
1108         _old_regions_removed++;
1109         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1110       }
1111     } else {
1112       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1113     }
1114 
1115     return false;
1116   }
1117 };
1118 
1119 class G1ParNoteEndTask: public AbstractGangTask {
1120   friend class G1NoteEndOfConcMarkClosure;
1121 
1122 protected:
1123   G1CollectedHeap* _g1h;
1124   FreeRegionList* _cleanup_list;
1125   HeapRegionClaimer _hrclaimer;
1126 
1127 public:
1128   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1129       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1130   }
1131 
1132   void work(uint worker_id) {
1133     FreeRegionList local_cleanup_list("Local Cleanup List");
1134     HRRSCleanupTask hrrs_cleanup_task;
1135     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1136                                            &hrrs_cleanup_task);
1137     _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
1138     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1139 
1140     // Now update the lists
1141     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1142     {
1143       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1144       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1145 
1146       // If we iterate over the global cleanup list at the end of
1147       // cleanup to do this printing we will not guarantee to only
1148       // generate output for the newly-reclaimed regions (the list
1149       // might not be empty at the beginning of cleanup; we might
1150       // still be working on its previous contents). So we do the
1151       // printing here, before we append the new regions to the global
1152       // cleanup list.
1153 
1154       G1HRPrinter* hr_printer = _g1h->hr_printer();
1155       if (hr_printer->is_active()) {
1156         FreeRegionListIterator iter(&local_cleanup_list);
1157         while (iter.more_available()) {
1158           HeapRegion* hr = iter.get_next();


2905                           G1PPRL_DOUBLE_H_FORMAT
2906                           G1PPRL_BYTE_H_FORMAT
2907                           G1PPRL_BYTE_H_FORMAT,
2908                           "type", "address-range",
2909                           "used", "prev-live", "next-live", "gc-eff",
2910                           "remset", "code-roots");
2911   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2912                           G1PPRL_TYPE_H_FORMAT
2913                           G1PPRL_ADDR_BASE_H_FORMAT
2914                           G1PPRL_BYTE_H_FORMAT
2915                           G1PPRL_BYTE_H_FORMAT
2916                           G1PPRL_BYTE_H_FORMAT
2917                           G1PPRL_DOUBLE_H_FORMAT
2918                           G1PPRL_BYTE_H_FORMAT
2919                           G1PPRL_BYTE_H_FORMAT,
2920                           "", "",
2921                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2922                           "(bytes)", "(bytes)");
2923 }
2924 
2925 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
2926   const char* type       = r->get_type_str();
2927   HeapWord* bottom       = r->bottom();
2928   HeapWord* end          = r->end();
2929   size_t capacity_bytes  = r->capacity();
2930   size_t used_bytes      = r->used();
2931   size_t prev_live_bytes = r->live_bytes();
2932   size_t next_live_bytes = r->next_live_bytes();
2933   double gc_eff          = r->gc_efficiency();
2934   size_t remset_bytes    = r->rem_set()->mem_size();
2935   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
2936 
2937   _total_used_bytes      += used_bytes;
2938   _total_capacity_bytes  += capacity_bytes;
2939   _total_prev_live_bytes += prev_live_bytes;
2940   _total_next_live_bytes += next_live_bytes;
2941   _total_remset_bytes    += remset_bytes;
2942   _total_strong_code_roots_bytes += strong_code_roots_bytes;
2943 
2944   // Print a line for this particular region.
2945   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX




 574 
 575 G1ConcurrentMark::~G1ConcurrentMark() {
 576   // The G1ConcurrentMark instance is never freed.
 577   ShouldNotReachHere();
 578 }
 579 
 580 class G1ClearBitMapTask : public AbstractGangTask {
 581 public:
 582   static size_t chunk_size() { return M; }
 583 
 584 private:
 585   // Heap region closure used for clearing the given mark bitmap.
 586   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 587   private:
 588     G1CMBitMap* _bitmap;
 589     G1ConcurrentMark* _cm;
 590   public:
 591     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 592     }
 593 
 594     virtual bool do_heap_region(HeapRegion* r) {
 595       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 596 
 597       HeapWord* cur = r->bottom();
 598       HeapWord* const end = r->end();
 599 
 600       while (cur < end) {
 601         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 602         _bitmap->clear_range(mr);
 603 
 604         cur += chunk_size_in_words;
 605 
 606         // Abort iteration if after yielding the marking has been aborted.
 607         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 608           return true;
 609         }
 610         // Repeat the asserts from before the start of the closure. We will do them
 611         // as asserts here to minimize their overhead on the product. However, we
 612         // will have them as guarantees at the beginning / end of the bitmap
 613         // clearing to get some checking in the product.
 614         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");


 621   };
 622 
 623   G1ClearBitmapHRClosure _cl;
 624   HeapRegionClaimer _hr_claimer;
 625   bool _suspendible; // If the task is suspendible, workers must join the STS.
 626 
 627 public:
 628   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 629     AbstractGangTask("G1 Clear Bitmap"),
 630     _cl(bitmap, suspendible ? cm : NULL),
 631     _hr_claimer(n_workers),
 632     _suspendible(suspendible)
 633   { }
 634 
 635   void work(uint worker_id) {
 636     SuspendibleThreadSetJoiner sts_join(_suspendible);
 637     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 638   }
 639 
 640   bool is_complete() {
 641     return _cl.is_complete();
 642   }
 643 };
 644 
 645 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 646   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 647 
 648   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 649   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 650 
 651   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 652 
 653   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 654 
 655   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 656   workers->run_task(&cl, num_workers);
 657   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 658 }
 659 
 660 void G1ConcurrentMark::cleanup_for_next_mark() {
 661   // Make sure that the concurrent mark thread looks to still be in


 677     DEBUG_ONLY(verify_live_data_clear());
 678   }
 679 
 680   // Repeat the asserts from above.
 681   guarantee(cm_thread()->during_cycle(), "invariant");
 682   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 683 }
 684 
 685 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 686   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 687   clear_bitmap(_prev_mark_bitmap, workers, false);
 688 }
 689 
 690 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 691   G1CMBitMap* _bitmap;
 692   bool _error;
 693  public:
 694   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 695   }
 696 
 697   virtual bool do_heap_region(HeapRegion* r) {
 698     // This closure can be called concurrently to the mutator, so we must make sure
 699     // that the result of the getNextMarkedWordAddress() call is compared to the
 700     // value passed to it as limit to detect any found bits.
 701     // end never changes in G1.
 702     HeapWord* end = r->end();
 703     return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
 704   }
 705 };
 706 
 707 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
 708   CheckBitmapClearHRClosure cl(_next_mark_bitmap);
 709   _g1h->heap_region_iterate(&cl);
 710   return cl.is_complete();
 711 }
 712 
 713 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 714 public:
 715   bool do_heap_region(HeapRegion* r) {
 716     r->note_start_of_marking();
 717     return false;
 718   }
 719 };
 720 
 721 void G1ConcurrentMark::checkpoint_roots_initial_pre() {
 722   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 723 
 724   _has_aborted = false;
 725 
 726   // Initialize marking structures. This has to be done in a STW phase.
 727   reset();
 728 
 729   // For each region note start of marking.
 730   NoteStartOfMarkHRClosure startcl;
 731   g1h->heap_region_iterate(&startcl);
 732 }
 733 
 734 
 735 void G1ConcurrentMark::checkpoint_roots_initial_post() {


1077   FreeRegionList* _local_cleanup_list;
1078   uint _old_regions_removed;
1079   uint _humongous_regions_removed;
1080   HRRSCleanupTask* _hrrs_cleanup_task;
1081 
1082 public:
1083   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1084                              FreeRegionList* local_cleanup_list,
1085                              HRRSCleanupTask* hrrs_cleanup_task) :
1086     _g1(g1),
1087     _freed_bytes(0),
1088     _local_cleanup_list(local_cleanup_list),
1089     _old_regions_removed(0),
1090     _humongous_regions_removed(0),
1091     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1092 
1093   size_t freed_bytes() { return _freed_bytes; }
1094   const uint old_regions_removed() { return _old_regions_removed; }
1095   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1096 
1097   bool do_heap_region(HeapRegion *hr) {
1098     _g1->reset_gc_time_stamps(hr);
1099     hr->note_end_of_marking();
1100 
1101     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1102       _freed_bytes += hr->used();
1103       hr->set_containing_set(NULL);
1104       if (hr->is_humongous()) {
1105         _humongous_regions_removed++;
1106         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1107       } else {
1108         _old_regions_removed++;
1109         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1110       }
1111     } else {
1112       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1113     }
1114 
1115     return false;
1116   }
1117 };
1118 
1119 class G1ParNoteEndTask: public AbstractGangTask {
1120   friend class G1NoteEndOfConcMarkClosure;
1121 
1122 protected:
1123   G1CollectedHeap* _g1h;
1124   FreeRegionList* _cleanup_list;
1125   HeapRegionClaimer _hrclaimer;
1126 
1127 public:
1128   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1129       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1130   }
1131 
1132   void work(uint worker_id) {
1133     FreeRegionList local_cleanup_list("Local Cleanup List");
1134     HRRSCleanupTask hrrs_cleanup_task;
1135     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1136                                            &hrrs_cleanup_task);
1137     _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
1138     assert(g1_note_end.is_complete(), "Shouldn't have yielded!");
1139 
1140     // Now update the lists
1141     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1142     {
1143       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1144       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1145 
1146       // If we iterate over the global cleanup list at the end of
1147       // cleanup to do this printing we will not guarantee to only
1148       // generate output for the newly-reclaimed regions (the list
1149       // might not be empty at the beginning of cleanup; we might
1150       // still be working on its previous contents). So we do the
1151       // printing here, before we append the new regions to the global
1152       // cleanup list.
1153 
1154       G1HRPrinter* hr_printer = _g1h->hr_printer();
1155       if (hr_printer->is_active()) {
1156         FreeRegionListIterator iter(&local_cleanup_list);
1157         while (iter.more_available()) {
1158           HeapRegion* hr = iter.get_next();


2905                           G1PPRL_DOUBLE_H_FORMAT
2906                           G1PPRL_BYTE_H_FORMAT
2907                           G1PPRL_BYTE_H_FORMAT,
2908                           "type", "address-range",
2909                           "used", "prev-live", "next-live", "gc-eff",
2910                           "remset", "code-roots");
2911   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2912                           G1PPRL_TYPE_H_FORMAT
2913                           G1PPRL_ADDR_BASE_H_FORMAT
2914                           G1PPRL_BYTE_H_FORMAT
2915                           G1PPRL_BYTE_H_FORMAT
2916                           G1PPRL_BYTE_H_FORMAT
2917                           G1PPRL_DOUBLE_H_FORMAT
2918                           G1PPRL_BYTE_H_FORMAT
2919                           G1PPRL_BYTE_H_FORMAT,
2920                           "", "",
2921                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2922                           "(bytes)", "(bytes)");
2923 }
2924 
2925 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2926   const char* type       = r->get_type_str();
2927   HeapWord* bottom       = r->bottom();
2928   HeapWord* end          = r->end();
2929   size_t capacity_bytes  = r->capacity();
2930   size_t used_bytes      = r->used();
2931   size_t prev_live_bytes = r->live_bytes();
2932   size_t next_live_bytes = r->next_live_bytes();
2933   double gc_eff          = r->gc_efficiency();
2934   size_t remset_bytes    = r->rem_set()->mem_size();
2935   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
2936 
2937   _total_used_bytes      += used_bytes;
2938   _total_capacity_bytes  += capacity_bytes;
2939   _total_prev_live_bytes += prev_live_bytes;
2940   _total_next_live_bytes += next_live_bytes;
2941   _total_remset_bytes    += remset_bytes;
2942   _total_strong_code_roots_bytes += strong_code_roots_bytes;
2943 
2944   // Print a line for this particular region.
2945   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX


< prev index next >