< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page




  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/growableArray.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                  const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = align_ptr_up(addr, HeapWordSize << _shifter);
  75   size_t addrOffset = heapWordToOffset(addr);
  76   assert(limit != NULL, "limit must not be NULL");
  77   size_t limitOffset = heapWordToOffset(limit);
  78   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  79   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  80   assert(nextAddr >= addr, "get_next_one postcondition");
  81   assert(nextAddr == limit || isMarked(nextAddr),
  82          "get_next_one postcondition");
  83   return nextAddr;
  84 }
  85 
  86 #ifndef PRODUCT
  87 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
  88   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  89   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  90          "size inconsistency");
  91   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
  92          _bmWordSize  == heap_rs.word_size();
  93 }
  94 #endif
  95 
  96 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
  97   _bm.print_on_error(st, prefix);
  98 }
  99 
 100 size_t G1CMBitMap::compute_size(size_t heap_size) {
 101   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 102 }
 103 
 104 size_t G1CMBitMap::mark_distance() {
 105   return MinObjAlignmentInBytes * BitsPerByte;
 106 }
 107 
 108 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 109   _bmStartWord = heap.start();
 110   _bmWordSize = heap.word_size();
 111 
 112   _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
 113 
 114   storage->set_mapping_changed_listener(&_listener);
 115 }
 116 
 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 118   if (zero_filled) {
 119     return;
 120   }
 121   // We need to clear the bitmap on commit, removing any existing information.


 153   if (_base != NULL) {
 154     MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
 155   }
 156 
 157   _base = new_base;
 158   _chunk_capacity = new_capacity;
 159   set_empty();
 160 
 161   return true;
 162 }
 163 
 164 size_t G1CMMarkStack::capacity_alignment() {
 165   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 166 }
 167 
 168 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 169   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 170 
 171   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 172 
 173   _max_chunk_capacity = align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 174   size_t initial_chunk_capacity = align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 175 
 176   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 177             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 178             _max_chunk_capacity,
 179             initial_chunk_capacity);
 180 
 181   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 182                 initial_chunk_capacity, _max_chunk_capacity);
 183 
 184   return resize(initial_chunk_capacity);
 185 }
 186 
 187 void G1CMMarkStack::expand() {
 188   if (_chunk_capacity == _max_chunk_capacity) {
 189     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 190     return;
 191   }
 192   size_t old_capacity = _chunk_capacity;
 193   // Double capacity if possible
 194   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);


 697     AbstractGangTask("G1 Clear Bitmap"),
 698     _cl(bitmap, suspendible ? cm : NULL),
 699     _hr_claimer(n_workers),
 700     _suspendible(suspendible)
 701   { }
 702 
 703   void work(uint worker_id) {
 704     SuspendibleThreadSetJoiner sts_join(_suspendible);
 705     G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer);
 706   }
 707 
 708   bool is_complete() {
 709     return _cl.complete();
 710   }
 711 };
 712 
 713 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 714   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 715 
 716   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 717   size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 718 
 719   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 720 
 721   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 722 
 723   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 724   workers->run_task(&cl, num_workers);
 725   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 726 }
 727 
 728 void G1ConcurrentMark::cleanup_for_next_mark() {
 729   // Make sure that the concurrent mark thread looks to still be in
 730   // the current cycle.
 731   guarantee(cmThread()->during_cycle(), "invariant");
 732 
 733   // We are finishing up the current cycle by clearing the next
 734   // marking bitmap and getting it ready for the next cycle. During
 735   // this time no other cycle can start. So, let's make sure that this
 736   // is the case.
 737   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");




  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/growableArray.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                  const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = align_up(addr, HeapWordSize << _shifter);
  75   size_t addrOffset = heapWordToOffset(addr);
  76   assert(limit != NULL, "limit must not be NULL");
  77   size_t limitOffset = heapWordToOffset(limit);
  78   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  79   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  80   assert(nextAddr >= addr, "get_next_one postcondition");
  81   assert(nextAddr == limit || isMarked(nextAddr),
  82          "get_next_one postcondition");
  83   return nextAddr;
  84 }
  85 
  86 #ifndef PRODUCT
  87 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
  88   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  89   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  90          "size inconsistency");
  91   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
  92          _bmWordSize  == heap_rs.word_size();
  93 }
  94 #endif
  95 
  96 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
  97   _bm.print_on_error(st, prefix);
  98 }
  99 
 100 size_t G1CMBitMap::compute_size(size_t heap_size) {
 101   return ReservedSpace::allocation_align_up(heap_size / mark_distance());
 102 }
 103 
 104 size_t G1CMBitMap::mark_distance() {
 105   return MinObjAlignmentInBytes * BitsPerByte;
 106 }
 107 
 108 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 109   _bmStartWord = heap.start();
 110   _bmWordSize = heap.word_size();
 111 
 112   _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
 113 
 114   storage->set_mapping_changed_listener(&_listener);
 115 }
 116 
 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 118   if (zero_filled) {
 119     return;
 120   }
 121   // We need to clear the bitmap on commit, removing any existing information.


 153   if (_base != NULL) {
 154     MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
 155   }
 156 
 157   _base = new_base;
 158   _chunk_capacity = new_capacity;
 159   set_empty();
 160 
 161   return true;
 162 }
 163 
 164 size_t G1CMMarkStack::capacity_alignment() {
 165   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 166 }
 167 
 168 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 169   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 170 
 171   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 172 
 173   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 174   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 175 
 176   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 177             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 178             _max_chunk_capacity,
 179             initial_chunk_capacity);
 180 
 181   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 182                 initial_chunk_capacity, _max_chunk_capacity);
 183 
 184   return resize(initial_chunk_capacity);
 185 }
 186 
 187 void G1CMMarkStack::expand() {
 188   if (_chunk_capacity == _max_chunk_capacity) {
 189     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 190     return;
 191   }
 192   size_t old_capacity = _chunk_capacity;
 193   // Double capacity if possible
 194   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);


 697     AbstractGangTask("G1 Clear Bitmap"),
 698     _cl(bitmap, suspendible ? cm : NULL),
 699     _hr_claimer(n_workers),
 700     _suspendible(suspendible)
 701   { }
 702 
 703   void work(uint worker_id) {
 704     SuspendibleThreadSetJoiner sts_join(_suspendible);
 705     G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer);
 706   }
 707 
 708   bool is_complete() {
 709     return _cl.complete();
 710   }
 711 };
 712 
 713 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 714   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 715 
 716   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 717   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 718 
 719   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 720 
 721   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 722 
 723   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 724   workers->run_task(&cl, num_workers);
 725   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 726 }
 727 
 728 void G1ConcurrentMark::cleanup_for_next_mark() {
 729   // Make sure that the concurrent mark thread looks to still be in
 730   // the current cycle.
 731   guarantee(cmThread()->during_cycle(), "invariant");
 732 
 733   // We are finishing up the current cycle by clearing the next
 734   // marking bitmap and getting it ready for the next cycle. During
 735   // this time no other cycle can start. So, let's make sure that this
 736   // is the case.
 737   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");


< prev index next >