Print this page
rev 3708 : 8000244: G1: Ergonomically set MarkStackSize and use virtual space for global marking stack
Summary: Set the value of MarkStackSize to a value based on the number of parallel marking threads with a reasonable minimum. Expand the marking stack if we have to restart marking due to an overflow up to a reasonable maximum. Allocate the underlying space for the marking stack from virtual memory.
Reviewed-by: jmasa
rev 3709 : imported patch reuse-old-marking-stack

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp
          +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp
↓ open down ↓ 38 lines elided ↑ open up ↑
  39   39  #include "memory/genOopClosures.inline.hpp"
  40   40  #include "memory/referencePolicy.hpp"
  41   41  #include "memory/resourceArea.hpp"
  42   42  #include "oops/oop.inline.hpp"
  43   43  #include "runtime/handles.inline.hpp"
  44   44  #include "runtime/java.hpp"
  45   45  #include "services/memTracker.hpp"
  46   46  
  47   47  // Concurrent marking bit map wrapper
  48   48  
  49      -CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
  50      -  _bm((uintptr_t*)NULL,0),
       49 +CMBitMapRO::CMBitMapRO(int shifter) :
       50 +  _bm(),
  51   51    _shifter(shifter) {
  52      -  _bmStartWord = (HeapWord*)(rs.base());
  53      -  _bmWordSize  = rs.size()/HeapWordSize;    // rs.size() is in bytes
  54      -  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
  55      -                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
  56      -
  57      -  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
  58      -
  59      -  guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
  60      -  // For now we'll just commit all of the bit map up fromt.
  61      -  // Later on we'll try to be more parsimonious with swap.
  62      -  guarantee(_virtual_space.initialize(brs, brs.size()),
  63      -            "couldn't reseve backing store for concurrent marking bit map");
  64      -  assert(_virtual_space.committed_size() == brs.size(),
  65      -         "didn't reserve backing store for all of concurrent marking bit map?");
  66      -  _bm.set_map((uintptr_t*)_virtual_space.low());
  67      -  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
  68      -         _bmWordSize, "inconsistency in bit map sizing");
  69      -  _bm.set_size(_bmWordSize >> _shifter);
       52 +  _bmStartWord = 0;
       53 +  _bmWordSize = 0;
  70   54  }
  71   55  
  72   56  HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  73   57                                                 HeapWord* limit) const {
  74   58    // First we must round addr *up* to a possible object boundary.
  75   59    addr = (HeapWord*)align_size_up((intptr_t)addr,
  76   60                                    HeapWordSize << _shifter);
  77   61    size_t addrOffset = heapWordToOffset(addr);
  78   62    if (limit == NULL) {
  79   63      limit = _bmStartWord + _bmWordSize;
↓ open down ↓ 21 lines elided ↑ open up ↑
 101   85           "get_next_one postcondition");
 102   86    return nextAddr;
 103   87  }
 104   88  
 105   89  int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 106   90    assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 107   91    return (int) (diff >> _shifter);
 108   92  }
 109   93  
 110   94  #ifndef PRODUCT
 111      -bool CMBitMapRO::covers(ReservedSpace rs) const {
       95 +bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
 112   96    // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 113   97    assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 114   98           "size inconsistency");
 115      -  return _bmStartWord == (HeapWord*)(rs.base()) &&
 116      -         _bmWordSize  == rs.size()>>LogHeapWordSize;
       99 +  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
      100 +         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 117  101  }
 118  102  #endif
 119  103  
      104 +bool CMBitMap::allocate(ReservedSpace heap_rs) {
      105 +  _bmStartWord = (HeapWord*)(heap_rs.base());
      106 +  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
      107 +  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
      108 +                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
      109 +  if (!brs.is_reserved()) {
      110 +    warning("ConcurrentMark marking bit map allocation failure");
      111 +    return false;
      112 +  }
      113 +  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
      114 +  // For now we'll just commit all of the bit map up front.
      115 +  // Later on we'll try to be more parsimonious with swap.
      116 +  if (!_virtual_space.initialize(brs, brs.size())) {
      117 +    warning("ConcurrentMark marking bit map backing store failure");
      118 +    return false;
      119 +  }
      120 +  assert(_virtual_space.committed_size() == brs.size(),
      121 +         "didn't reserve backing store for all of concurrent marking bit map?");
      122 +  _bm.set_map((uintptr_t*)_virtual_space.low());
      123 +  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
      124 +         _bmWordSize, "inconsistency in bit map sizing");
      125 +  _bm.set_size(_bmWordSize >> _shifter);
      126 +  return true;
      127 +}
      128 +
 120  129  void CMBitMap::clearAll() {
 121  130    _bm.clear();
 122  131    return;
 123  132  }
 124  133  
 125  134  void CMBitMap::markRange(MemRegion mr) {
 126  135    mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 127  136    assert(!mr.is_empty(), "unexpected empty region");
 128  137    assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 129  138            ((HeapWord *) mr.end())),
↓ open down ↓ 26 lines elided ↑ open up ↑
 156  165  }
 157  166  
 158  167  CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 159  168    _base(NULL), _cm(cm)
 160  169  #ifdef ASSERT
 161  170    , _drain_in_progress(false)
 162  171    , _drain_in_progress_yields(false)
 163  172  #endif
 164  173  {}
 165  174  
 166      -void CMMarkStack::allocate(size_t size) {
 167      -  _base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
 168      -  if (_base == NULL) {
 169      -    vm_exit_during_initialization("Failed to allocate CM region mark stack");
      175 +bool CMMarkStack::allocate(size_t capacity) {
      176 +  // allocate a stack of the requisite depth
      177 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
      178 +  if (!rs.is_reserved()) {
      179 +    warning("ConcurrentMark MarkStack allocation failure");
      180 +    return false;
 170  181    }
 171      -  _index = 0;
 172      -  _capacity = (jint) size;
      182 +  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
      183 +  if (!_virtual_space.initialize(rs, rs.size())) {
      184 +    warning("ConcurrentMark MarkStack backing store failure");
      185 +    // Release the virtual memory reserved for the marking stack
      186 +    rs.release();
      187 +    return false;
      188 +  }
      189 +  assert(_virtual_space.committed_size() == rs.size(),
      190 +         "Didn't reserve backing store for all of ConcurrentMark stack?");
      191 +  _rs = rs;
      192 +  _base = (oop*) _virtual_space.low();
      193 +  setEmpty();
      194 +  _capacity = (jint) capacity;
 173  195    _saved_index = -1;
 174  196    NOT_PRODUCT(_max_depth = 0);
      197 +  return true;
      198 +}
      199 +
      200 +void CMMarkStack::expand() {
      201 +  // Called, during remark, if we've overflown the marking stack during marking.
      202 +  assert(isEmpty(), "stack should been emptied while handling overflow");
      203 +  assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
      204 +  // Clear expansion flag
      205 +  _should_expand = false;
      206 +  if (_capacity == (jint) MarkStackSizeMax) {
      207 +    if (PrintGCDetails && Verbose) {
      208 +      gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
      209 +    }
      210 +    return;
      211 +  }
      212 +  // Double capacity if possible
      213 +  jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
      214 +  // Do not give up existing stack until we have managed to
      215 +  // get the double capacity that we desired.
      216 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
      217 +                                                           sizeof(oop)));
      218 +  if (!rs.is_reserved()) {
      219 +    if (PrintGCDetails && Verbose) {
      220 +      // Failed to double capacity, continue;
      221 +      gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
      222 +                          SIZE_FORMAT "K to " SIZE_FORMAT "K",
      223 +                          _capacity / K, new_capacity / K);
      224 +    }
      225 +    return;
      226 +  }
      227 +
      228 +  // Clear the backing store fields associated with the space for the
      229 +  // old marking stack. Note this doesn't actuall release the space.
      230 +  _virtual_space.release();
      231 +
      232 +  // Reinitialize virtual space for the expanded stack.
      233 +  if (!_virtual_space.initialize(rs, rs.size())) {
      234 +    // We failed to commit the the space for the expanded marking stack
      235 +    // Release the expanded reserved space...
      236 +    rs.release();
      237 +    // ... and reinitialize with the previous un-expanded space.
      238 +    if (_virtual_space.initialize(_rs, _rs.size())) {
      239 +      if (PrintGCDetails && Verbose) {
      240 +        gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
      241 +                            SIZE_FORMAT "K to " SIZE_FORMAT "K",
      242 +                            _capacity / K, new_capacity / K);
      243 +      }
      244 +    } else {
      245 +      // The previous backing store space should have been already
      246 +      // committed but we failed to initialize the virtual space
      247 +      // for some reason.
      248 +      fatal("Error re-initializing marking stack with old capacity");
      249 +    }
      250 +  } else {
      251 +    // We successfully committed the space for the expanded marking stack.
      252 +    if (PrintGCDetails && Verbose) {
      253 +      gclog_or_tty->print(" Successfully expanded marking stack capacity from "
      254 +                          SIZE_FORMAT "K to " SIZE_FORMAT "K",
      255 +                          _capacity / K, new_capacity / K);
      256 +    }
      257 +    // Release the previous (unexpanded) space.
      258 +    _rs.release();
      259 +    // Record the new (expanded) space.
      260 +    _rs = rs;
      261 +    // Record the new capacity
      262 +    _capacity = new_capacity;
      263 +  }
      264 +  assert(_virtual_space.committed_size() == _rs.size(),
      265 +         "Didn't reserve backing store for all of ConcurrentMark stack?");
      266 +  _base = (oop*)(_virtual_space.low());
      267 +  _index = 0;
      268 +}
      269 +
      270 +void CMMarkStack::set_should_expand() {
      271 +  // If we're resetting the marking state because of an
      272 +  // marking stack overflow, record that we should, if
      273 +  // possible, expand the stack.
      274 +  _should_expand = _cm->has_overflown();
 175  275  }
 176  276  
 177  277  CMMarkStack::~CMMarkStack() {
 178  278    if (_base != NULL) {
 179      -    FREE_C_HEAP_ARRAY(oop, _base, mtGC);
      279 +    _base = NULL;
      280 +    _virtual_space.release();
 180  281    }
 181  282  }
 182  283  
 183  284  void CMMarkStack::par_push(oop ptr) {
 184  285    while (true) {
 185  286      if (isFull()) {
 186  287        _overflow = true;
 187  288        return;
 188  289      }
 189  290      // Otherwise...
↓ open down ↓ 20 lines elided ↑ open up ↑
 210  311      // Otherwise...
 211  312      jint index = _index;
 212  313      jint next_index = index + n;
 213  314      if (next_index > _capacity) {
 214  315        _overflow = true;
 215  316        return;
 216  317      }
 217  318      jint res = Atomic::cmpxchg(next_index, &_index, index);
 218  319      if (res == index) {
 219  320        for (int i = 0; i < n; i++) {
 220      -        int ind = index + i;
      321 +        int  ind = index + i;
 221  322          assert(ind < _capacity, "By overflow test above.");
 222  323          _base[ind] = ptr_arr[i];
 223  324        }
 224  325        NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 225  326        return;
 226  327      }
 227  328      // Otherwise, we need to try again.
 228  329    }
 229  330  }
 230  331  
 231      -
 232  332  void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 233  333    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 234  334    jint start = _index;
 235  335    jint next_index = start + n;
 236  336    if (next_index > _capacity) {
 237  337      _overflow = true;
 238  338      return;
 239  339    }
 240  340    // Otherwise.
 241  341    _index = next_index;
 242  342    for (int i = 0; i < n; i++) {
 243  343      int ind = start + i;
 244  344      assert(ind < _capacity, "By overflow test above.");
 245  345      _base[ind] = ptr_arr[i];
 246  346    }
      347 +  NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 247  348  }
 248  349  
 249      -
 250  350  bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 251  351    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 252  352    jint index = _index;
 253  353    if (index == 0) {
 254  354      *n = 0;
 255  355      return false;
 256  356    } else {
 257  357      int k = MIN2(max, index);
 258      -    jint new_ind = index - k;
      358 +    jint  new_ind = index - k;
 259  359      for (int j = 0; j < k; j++) {
 260  360        ptr_arr[j] = _base[new_ind + j];
 261  361      }
 262  362      _index = new_ind;
 263  363      *n = k;
 264  364      return true;
 265  365    }
 266  366  }
 267  367  
 268  368  template<class OopClosureClass>
↓ open down ↓ 128 lines elided ↑ open up ↑
 397  497  }
 398  498  
 399  499  #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 400  500  #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 401  501  #endif // _MSC_VER
 402  502  
 403  503  uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 404  504    return MAX2((n_par_threads + 2) / 4, 1U);
 405  505  }
 406  506  
 407      -ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
 408      -  _markBitMap1(rs, MinObjAlignment - 1),
 409      -  _markBitMap2(rs, MinObjAlignment - 1),
      507 +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
      508 +  _g1h(g1h),
      509 +  _markBitMap1(MinObjAlignment - 1),
      510 +  _markBitMap2(MinObjAlignment - 1),
 410  511  
 411  512    _parallel_marking_threads(0),
 412  513    _max_parallel_marking_threads(0),
 413  514    _sleep_factor(0.0),
 414  515    _marking_task_overhead(1.0),
 415  516    _cleanup_sleep_factor(0.0),
 416  517    _cleanup_task_overhead(1.0),
 417  518    _cleanup_list("Cleanup List"),
 418      -  _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
 419      -  _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
 420      -           CardTableModRefBS::card_shift,
 421      -           false /* in_resource_area*/),
      519 +  _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
      520 +  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
      521 +            CardTableModRefBS::card_shift,
      522 +            false /* in_resource_area*/),
 422  523  
 423  524    _prevMarkBitMap(&_markBitMap1),
 424  525    _nextMarkBitMap(&_markBitMap2),
 425  526  
 426  527    _markStack(this),
 427  528    // _finger set in set_non_marking_state
 428  529  
 429  530    _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 430  531    // _active_tasks set in set_non_marking_state
 431  532    // _tasks set inside the constructor
↓ open down ↓ 10 lines elided ↑ open up ↑
 442  543  
 443  544    _init_times(),
 444  545    _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 445  546    _cleanup_times(),
 446  547    _total_counting_time(0.0),
 447  548    _total_rs_scrub_time(0.0),
 448  549  
 449  550    _parallel_workers(NULL),
 450  551  
 451  552    _count_card_bitmaps(NULL),
 452      -  _count_marked_bytes(NULL) {
      553 +  _count_marked_bytes(NULL),
      554 +  _completed_initialization(false) {
 453  555    CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 454  556    if (verbose_level < no_verbose) {
 455  557      verbose_level = no_verbose;
 456  558    }
 457  559    if (verbose_level > high_verbose) {
 458  560      verbose_level = high_verbose;
 459  561    }
 460  562    _verbose_level = verbose_level;
 461  563  
 462  564    if (verbose_low()) {
 463  565      gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 464  566                             "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 465  567    }
 466  568  
 467      -  _markStack.allocate(MarkStackSize);
      569 +  if (!_markBitMap1.allocate(heap_rs)) {
      570 +    warning("Failed to allocate first CM bit map");
      571 +    return;
      572 +  }
      573 +  if (!_markBitMap2.allocate(heap_rs)) {
      574 +    warning("Failed to allocate second CM bit map");
      575 +    return;
      576 +  }
 468  577  
 469  578    // Create & start a ConcurrentMark thread.
 470  579    _cmThread = new ConcurrentMarkThread(this);
 471  580    assert(cmThread() != NULL, "CM Thread should have been created");
 472  581    assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 473  582  
 474      -  _g1h = G1CollectedHeap::heap();
 475  583    assert(CGC_lock != NULL, "Where's the CGC_lock?");
 476      -  assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
 477      -  assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
      584 +  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
      585 +  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 478  586  
 479  587    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 480  588    satb_qs.set_buffer_size(G1SATBBufferSize);
 481  589  
 482  590    _root_regions.init(_g1h, this);
 483  591  
 484      -  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 485      -  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 486      -
 487      -  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 488      -  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 489      -
 490      -  BitMap::idx_t card_bm_size = _card_bm.size();
 491      -
 492      -  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 493      -  _active_tasks = _max_worker_id;
 494      -  for (uint i = 0; i < _max_worker_id; ++i) {
 495      -    CMTaskQueue* task_queue = new CMTaskQueue();
 496      -    task_queue->initialize();
 497      -    _task_queues->register_queue(i, task_queue);
 498      -
 499      -    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 500      -    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
 501      -
 502      -    _tasks[i] = new CMTask(i, this,
 503      -                           _count_marked_bytes[i],
 504      -                           &_count_card_bitmaps[i],
 505      -                           task_queue, _task_queues);
 506      -
 507      -    _accum_task_vtime[i] = 0.0;
 508      -  }
 509      -
 510      -  // Calculate the card number for the bottom of the heap. Used
 511      -  // in biasing indexes into the accounting card bitmaps.
 512      -  _heap_bottom_card_num =
 513      -    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 514      -                                CardTableModRefBS::card_shift);
 515      -
 516      -  // Clear all the liveness counting data
 517      -  clear_all_count_data();
 518      -
 519  592    if (ConcGCThreads > ParallelGCThreads) {
 520      -    vm_exit_during_initialization("Can't have more ConcGCThreads "
 521      -                                  "than ParallelGCThreads.");
      593 +    warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
      594 +            "than ParallelGCThreads (" UINT32_FORMAT ").",
      595 +            ConcGCThreads, ParallelGCThreads);
      596 +    return;
 522  597    }
 523  598    if (ParallelGCThreads == 0) {
 524  599      // if we are not running with any parallel GC threads we will not
 525  600      // spawn any marking threads either
 526  601      _parallel_marking_threads =       0;
 527  602      _max_parallel_marking_threads =   0;
 528  603      _sleep_factor             =     0.0;
 529  604      _marking_task_overhead    =     1.0;
 530  605    } else {
 531  606      if (ConcGCThreads > 0) {
↓ open down ↓ 51 lines elided ↑ open up ↑
 583  658      guarantee(parallel_marking_threads() > 0, "peace of mind");
 584  659      _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 585  660           _max_parallel_marking_threads, false, true);
 586  661      if (_parallel_workers == NULL) {
 587  662        vm_exit_during_initialization("Failed necessary allocation.");
 588  663      } else {
 589  664        _parallel_workers->initialize_workers();
 590  665      }
 591  666    }
 592  667  
      668 +  if (FLAG_IS_DEFAULT(MarkStackSize)) {
      669 +    uintx mark_stack_size =
      670 +      MIN2(MarkStackSizeMax,
      671 +          MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
      672 +    // Verify that the calculated value for MarkStackSize is in range.
      673 +    // It would be nice to use the private utility routine from Arguments.
      674 +    if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
      675 +      warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
      676 +              "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
      677 +              mark_stack_size, 1, MarkStackSizeMax);
      678 +      return;
      679 +    }
      680 +    FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
      681 +  } else {
      682 +    // Verify MarkStackSize is in range.
      683 +    if (FLAG_IS_CMDLINE(MarkStackSize)) {
      684 +      if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
      685 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
      686 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
      687 +                  "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
      688 +                  MarkStackSize, 1, MarkStackSizeMax);
      689 +          return;
      690 +        }
      691 +      } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
      692 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
      693 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
      694 +                  " or for MarkStackSizeMax (" UINTX_FORMAT ")",
      695 +                  MarkStackSize, MarkStackSizeMax);
      696 +          return;
      697 +        }
      698 +      }
      699 +    }
      700 +  }
      701 +
      702 +  if (!_markStack.allocate(MarkStackSize)) {
      703 +    warning("Failed to allocate CM marking stack");
      704 +    return;
      705 +  }
      706 +
      707 +  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
      708 +  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
      709 +
      710 +  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
      711 +  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
      712 +
      713 +  BitMap::idx_t card_bm_size = _card_bm.size();
      714 +
      715 +  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
      716 +  _active_tasks = _max_worker_id;
      717 +
      718 +  size_t max_regions = (size_t) _g1h->max_regions();
      719 +  for (uint i = 0; i < _max_worker_id; ++i) {
      720 +    CMTaskQueue* task_queue = new CMTaskQueue();
      721 +    task_queue->initialize();
      722 +    _task_queues->register_queue(i, task_queue);
      723 +
      724 +    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
      725 +    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
      726 +
      727 +    _tasks[i] = new CMTask(i, this,
      728 +                           _count_marked_bytes[i],
      729 +                           &_count_card_bitmaps[i],
      730 +                           task_queue, _task_queues);
      731 +
      732 +    _accum_task_vtime[i] = 0.0;
      733 +  }
      734 +
      735 +  // Calculate the card number for the bottom of the heap. Used
      736 +  // in biasing indexes into the accounting card bitmaps.
      737 +  _heap_bottom_card_num =
      738 +    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
      739 +                                CardTableModRefBS::card_shift);
      740 +
      741 +  // Clear all the liveness counting data
      742 +  clear_all_count_data();
      743 +
 593  744    // so that the call below can read a sensible value
 594      -  _heap_start = (HeapWord*) rs.base();
      745 +  _heap_start = (HeapWord*) heap_rs.base();
 595  746    set_non_marking_state();
      747 +  _completed_initialization = true;
 596  748  }
 597  749  
 598  750  void ConcurrentMark::update_g1_committed(bool force) {
 599  751    // If concurrent marking is not in progress, then we do not need to
 600  752    // update _heap_end.
 601  753    if (!concurrent_marking_in_progress() && !force) return;
 602  754  
 603  755    MemRegion committed = _g1h->g1_committed();
 604  756    assert(committed.start() == _heap_start, "start shouldn't change");
 605  757    HeapWord* new_end = committed.end();
↓ open down ↓ 552 lines elided ↑ open up ↑
1158 1310      if (VerifyDuringGC) {
1159 1311        HandleMark hm;  // handle scope
1160 1312        gclog_or_tty->print(" VerifyDuringGC:(after)");
1161 1313        Universe::heap()->prepare_for_verify();
1162 1314        Universe::verify(/* silent      */ false,
1163 1315                         /* option      */ VerifyOption_G1UseNextMarking);
1164 1316      }
1165 1317      assert(!restart_for_overflow(), "sanity");
1166 1318    }
1167 1319  
     1320 +  // Expand the marking stack, if we have to and if we can.
     1321 +  if (_markStack.should_expand()) {
     1322 +    _markStack.expand();
     1323 +  }
     1324 +
1168 1325    // Reset the marking state if marking completed
1169 1326    if (!restart_for_overflow()) {
1170 1327      set_non_marking_state();
1171 1328    }
1172 1329  
1173 1330  #if VERIFY_OBJS_PROCESSED
1174 1331    _scan_obj_cl.objs_processed = 0;
1175 1332    ThreadLocalObjQueue::objs_enqueued = 0;
1176 1333  #endif
1177 1334  
↓ open down ↓ 1600 lines elided ↑ open up ↑
2778 2935    VerifyNoCSetOopsClosure cl;
2779 2936  
2780 2937    if (verify_stacks) {
2781 2938      // Verify entries on the global mark stack
2782 2939      cl.set_phase(VerifyNoCSetOopsStack);
2783 2940      _markStack.oops_do(&cl);
2784 2941  
2785 2942      // Verify entries on the task queues
2786 2943      for (uint i = 0; i < _max_worker_id; i += 1) {
2787 2944        cl.set_phase(VerifyNoCSetOopsQueues, i);
2788      -      OopTaskQueue* queue = _task_queues->queue(i);
     2945 +      CMTaskQueue* queue = _task_queues->queue(i);
2789 2946        queue->oops_do(&cl);
2790 2947      }
2791 2948    }
2792 2949  
2793 2950    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2794 2951  
2795 2952    // Verify entries on the enqueued SATB buffers
2796 2953    if (verify_enqueued_buffers) {
2797 2954      cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2798 2955      satb_qs.iterate_completed_buffers_read_only(&cl);
↓ open down ↓ 34 lines elided ↑ open up ↑
2833 2990                    !task_hr->in_collection_set(),
2834 2991                    err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2835 2992                            task_finger, HR_FORMAT_PARAMS(task_hr)));
2836 2993        }
2837 2994      }
2838 2995    }
2839 2996  }
2840 2997  #endif // PRODUCT
2841 2998  
2842 2999  void ConcurrentMark::clear_marking_state(bool clear_overflow) {
2843      -  _markStack.setEmpty();
2844      -  _markStack.clear_overflow();
     3000 +  _markStack.set_should_expand();
     3001 +  _markStack.setEmpty();        // Also clears the _markStack overflow flag
2845 3002    if (clear_overflow) {
2846 3003      clear_has_overflown();
2847 3004    } else {
2848 3005      assert(has_overflown(), "pre-condition");
2849 3006    }
2850 3007    _finger = _heap_start;
2851 3008  
2852 3009    for (uint i = 0; i < _max_worker_id; ++i) {
2853      -    OopTaskQueue* queue = _task_queues->queue(i);
     3010 +    CMTaskQueue* queue = _task_queues->queue(i);
2854 3011      queue->set_empty();
2855 3012    }
2856 3013  }
2857 3014  
2858 3015  // Aggregate the counting data that was constructed concurrently
2859 3016  // with marking.
2860 3017  class AggregateCountDataHRClosure: public HeapRegionClosure {
2861 3018    G1CollectedHeap* _g1h;
2862 3019    ConcurrentMark* _cm;
2863 3020    CardTableModRefBS* _ct_bs;
↓ open down ↓ 1583 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX