< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




3202   for (int i = (int) num; i > 0; i--) {
3203     oop cur = ovflw_stk->pop();
3204     assert(cur != NULL, "Counted wrong?");
3205     work_q->push(cur);
3206   }
3207   return num > 0;
3208 }
3209 
3210 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3211   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3212   int n_tasks = pst->n_tasks();
3213   // We allow that there may be no tasks to do here because
3214   // we are restarting after a stack overflow.
3215   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3216   uint nth_task = 0;
3217 
3218   HeapWord* aligned_start = sp->bottom();
3219   if (sp->used_region().contains(_restart_addr)) {
3220     // Align down to a card boundary for the start of 0th task
3221     // for this space.
3222     aligned_start = align_ptr_down(_restart_addr, CardTableModRefBS::card_size);
3223   }
3224 
3225   size_t chunk_size = sp->marking_task_size();
3226   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3227     // Having claimed the nth task in this space,
3228     // compute the chunk that it corresponds to:
3229     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3230                                aligned_start + (nth_task+1)*chunk_size);
3231     // Try and bump the global finger via a CAS;
3232     // note that we need to do the global finger bump
3233     // _before_ taking the intersection below, because
3234     // the task corresponding to that region will be
3235     // deemed done even if the used_region() expands
3236     // because of allocation -- as it almost certainly will
3237     // during start-up while the threads yield in the
3238     // closure below.
3239     HeapWord* finger = span.end();
3240     bump_global_finger(finger);   // atomically
3241     // There are null tasks here corresponding to chunks
3242     // beyond the "top" address of the space.


5635 
5636 
5637 // CMS Bit Map Wrapper /////////////////////////////////////////
5638 
5639 // Construct a CMS bit map infrastructure, but don't create the
5640 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5641 // further below.
5642 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5643   _bm(),
5644   _shifter(shifter),
5645   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5646                                     Monitor::_safepoint_check_sometimes) : NULL)
5647 {
5648   _bmStartWord = 0;
5649   _bmWordSize  = 0;
5650 }
5651 
5652 bool CMSBitMap::allocate(MemRegion mr) {
5653   _bmStartWord = mr.start();
5654   _bmWordSize  = mr.word_size();
5655   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5656                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5657   if (!brs.is_reserved()) {
5658     log_warning(gc)("CMS bit map allocation failure");
5659     return false;
5660   }
5661   // For now we'll just commit all of the bit map up front.
5662   // Later on we'll try to be more parsimonious with swap.
5663   if (!_virtual_space.initialize(brs, brs.size())) {
5664     log_warning(gc)("CMS bit map backing store failure");
5665     return false;
5666   }
5667   assert(_virtual_space.committed_size() == brs.size(),
5668          "didn't reserve backing store for all of CMS bit map?");
5669   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5670          _bmWordSize, "inconsistency in bit map sizing");
5671   _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5672 
5673   // bm.clear(); // can we rely on getting zero'd memory? verify below
5674   assert(isAllClear(),
5675          "Expected zero'd memory from ReservedSpace constructor");


5726 void CMSBitMap::region_invariant(MemRegion mr)
5727 {
5728   assert_locked();
5729   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5730   assert(!mr.is_empty(), "unexpected empty region");
5731   assert(covers(mr), "mr should be covered by bit map");
5732   // convert address range into offset range
5733   size_t start_ofs = heapWordToOffset(mr.start());
5734   // Make sure that end() is appropriately aligned
5735   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5736                         (1 << (_shifter+LogHeapWordSize))),
5737          "Misaligned mr.end()");
5738   size_t end_ofs   = heapWordToOffset(mr.end());
5739   assert(end_ofs > start_ofs, "Should mark at least one bit");
5740 }
5741 
5742 #endif
5743 
5744 bool CMSMarkStack::allocate(size_t size) {
5745   // allocate a stack of the requisite depth
5746   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5747                    size * sizeof(oop)));
5748   if (!rs.is_reserved()) {
5749     log_warning(gc)("CMSMarkStack allocation failure");
5750     return false;
5751   }
5752   if (!_virtual_space.initialize(rs, rs.size())) {
5753     log_warning(gc)("CMSMarkStack backing store failure");
5754     return false;
5755   }
5756   assert(_virtual_space.committed_size() == rs.size(),
5757          "didn't reserve backing store for all of CMS stack?");
5758   _base = (oop*)(_virtual_space.low());
5759   _index = 0;
5760   _capacity = size;
5761   NOT_PRODUCT(_max_depth = 0);
5762   return true;
5763 }
5764 
5765 // XXX FIX ME !!! In the MT case we come in here holding a
5766 // leaf lock. For printing we need to take a further lock
5767 // which has lower rank. We need to recalibrate the two
5768 // lock-ranks involved in order to be able to print the
5769 // messages below. (Or defer the printing to the caller.
5770 // For now we take the expedient path of just disabling the
5771 // messages for the problematic case.)
5772 void CMSMarkStack::expand() {
5773   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5774   if (_capacity == MarkStackSizeMax) {
5775     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5776       // We print a warning message only once per CMS cycle.
5777       log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5778     }
5779     return;
5780   }
5781   // Double capacity if possible
5782   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5783   // Do not give up existing stack until we have managed to
5784   // get the double capacity that we desired.
5785   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5786                    new_capacity * sizeof(oop)));
5787   if (rs.is_reserved()) {
5788     // Release the backing store associated with old stack
5789     _virtual_space.release();
5790     // Reinitialize virtual space for new stack
5791     if (!_virtual_space.initialize(rs, rs.size())) {
5792       fatal("Not enough swap for expanded marking stack");
5793     }
5794     _base = (oop*)(_virtual_space.low());
5795     _index = 0;
5796     _capacity = new_capacity;
5797   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5798     // Failed to double capacity, continue;
5799     // we print a detail message only once per CMS cycle.
5800     log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5801                         _capacity / K, new_capacity / K);
5802   }
5803 }
5804 
5805 




3202   for (int i = (int) num; i > 0; i--) {
3203     oop cur = ovflw_stk->pop();
3204     assert(cur != NULL, "Counted wrong?");
3205     work_q->push(cur);
3206   }
3207   return num > 0;
3208 }
3209 
3210 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3211   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3212   int n_tasks = pst->n_tasks();
3213   // We allow that there may be no tasks to do here because
3214   // we are restarting after a stack overflow.
3215   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3216   uint nth_task = 0;
3217 
3218   HeapWord* aligned_start = sp->bottom();
3219   if (sp->used_region().contains(_restart_addr)) {
3220     // Align down to a card boundary for the start of 0th task
3221     // for this space.
3222     aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
3223   }
3224 
3225   size_t chunk_size = sp->marking_task_size();
3226   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3227     // Having claimed the nth task in this space,
3228     // compute the chunk that it corresponds to:
3229     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3230                                aligned_start + (nth_task+1)*chunk_size);
3231     // Try and bump the global finger via a CAS;
3232     // note that we need to do the global finger bump
3233     // _before_ taking the intersection below, because
3234     // the task corresponding to that region will be
3235     // deemed done even if the used_region() expands
3236     // because of allocation -- as it almost certainly will
3237     // during start-up while the threads yield in the
3238     // closure below.
3239     HeapWord* finger = span.end();
3240     bump_global_finger(finger);   // atomically
3241     // There are null tasks here corresponding to chunks
3242     // beyond the "top" address of the space.


5635 
5636 
5637 // CMS Bit Map Wrapper /////////////////////////////////////////
5638 
5639 // Construct a CMS bit map infrastructure, but don't create the
5640 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5641 // further below.
5642 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5643   _bm(),
5644   _shifter(shifter),
5645   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5646                                     Monitor::_safepoint_check_sometimes) : NULL)
5647 {
5648   _bmStartWord = 0;
5649   _bmWordSize  = 0;
5650 }
5651 
5652 bool CMSBitMap::allocate(MemRegion mr) {
5653   _bmStartWord = mr.start();
5654   _bmWordSize  = mr.word_size();
5655   ReservedSpace brs(ReservedSpace::allocation_align_up(
5656                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5657   if (!brs.is_reserved()) {
5658     log_warning(gc)("CMS bit map allocation failure");
5659     return false;
5660   }
5661   // For now we'll just commit all of the bit map up front.
5662   // Later on we'll try to be more parsimonious with swap.
5663   if (!_virtual_space.initialize(brs, brs.size())) {
5664     log_warning(gc)("CMS bit map backing store failure");
5665     return false;
5666   }
5667   assert(_virtual_space.committed_size() == brs.size(),
5668          "didn't reserve backing store for all of CMS bit map?");
5669   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5670          _bmWordSize, "inconsistency in bit map sizing");
5671   _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5672 
5673   // bm.clear(); // can we rely on getting zero'd memory? verify below
5674   assert(isAllClear(),
5675          "Expected zero'd memory from ReservedSpace constructor");


5726 void CMSBitMap::region_invariant(MemRegion mr)
5727 {
5728   assert_locked();
5729   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5730   assert(!mr.is_empty(), "unexpected empty region");
5731   assert(covers(mr), "mr should be covered by bit map");
5732   // convert address range into offset range
5733   size_t start_ofs = heapWordToOffset(mr.start());
5734   // Make sure that end() is appropriately aligned
5735   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5736                         (1 << (_shifter+LogHeapWordSize))),
5737          "Misaligned mr.end()");
5738   size_t end_ofs   = heapWordToOffset(mr.end());
5739   assert(end_ofs > start_ofs, "Should mark at least one bit");
5740 }
5741 
5742 #endif
5743 
5744 bool CMSMarkStack::allocate(size_t size) {
5745   // allocate a stack of the requisite depth
5746   ReservedSpace rs(ReservedSpace::allocation_align_up(
5747                    size * sizeof(oop)));
5748   if (!rs.is_reserved()) {
5749     log_warning(gc)("CMSMarkStack allocation failure");
5750     return false;
5751   }
5752   if (!_virtual_space.initialize(rs, rs.size())) {
5753     log_warning(gc)("CMSMarkStack backing store failure");
5754     return false;
5755   }
5756   assert(_virtual_space.committed_size() == rs.size(),
5757          "didn't reserve backing store for all of CMS stack?");
5758   _base = (oop*)(_virtual_space.low());
5759   _index = 0;
5760   _capacity = size;
5761   NOT_PRODUCT(_max_depth = 0);
5762   return true;
5763 }
5764 
5765 // XXX FIX ME !!! In the MT case we come in here holding a
5766 // leaf lock. For printing we need to take a further lock
5767 // which has lower rank. We need to recalibrate the two
5768 // lock-ranks involved in order to be able to print the
5769 // messages below. (Or defer the printing to the caller.
5770 // For now we take the expedient path of just disabling the
5771 // messages for the problematic case.)
5772 void CMSMarkStack::expand() {
5773   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5774   if (_capacity == MarkStackSizeMax) {
5775     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5776       // We print a warning message only once per CMS cycle.
5777       log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5778     }
5779     return;
5780   }
5781   // Double capacity if possible
5782   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5783   // Do not give up existing stack until we have managed to
5784   // get the double capacity that we desired.
5785   ReservedSpace rs(ReservedSpace::allocation_align_up(
5786                    new_capacity * sizeof(oop)));
5787   if (rs.is_reserved()) {
5788     // Release the backing store associated with old stack
5789     _virtual_space.release();
5790     // Reinitialize virtual space for new stack
5791     if (!_virtual_space.initialize(rs, rs.size())) {
5792       fatal("Not enough swap for expanded marking stack");
5793     }
5794     _base = (oop*)(_virtual_space.low());
5795     _index = 0;
5796     _capacity = new_capacity;
5797   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5798     // Failed to double capacity, continue;
5799     // we print a detail message only once per CMS cycle.
5800     log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5801                         _capacity / K, new_capacity / K);
5802   }
5803 }
5804 
5805 


< prev index next >