Print this page
rev 3708 : 8000244: G1: Ergonomically set MarkStackSize and use virtual space for global marking stack
Summary: Set the value of MarkStackSize to a value based on the number of parallel marking threads with a reasonable minimum. Expand the marking stack if we have to restart marking due to an overflow up to a reasonable maximum. Allocate the underlying space for the marking stack from virtual memory.
Reviewed-by: jmasa

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp
          +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp
↓ open down ↓ 38 lines elided ↑ open up ↑
  39   39  #include "memory/genOopClosures.inline.hpp"
  40   40  #include "memory/referencePolicy.hpp"
  41   41  #include "memory/resourceArea.hpp"
  42   42  #include "oops/oop.inline.hpp"
  43   43  #include "runtime/handles.inline.hpp"
  44   44  #include "runtime/java.hpp"
  45   45  #include "services/memTracker.hpp"
  46   46  
  47   47  // Concurrent marking bit map wrapper
  48   48  
  49      -CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
  50      -  _bm((uintptr_t*)NULL,0),
       49 +CMBitMapRO::CMBitMapRO(int shifter) :
       50 +  _bm(),
  51   51    _shifter(shifter) {
  52      -  _bmStartWord = (HeapWord*)(rs.base());
  53      -  _bmWordSize  = rs.size()/HeapWordSize;    // rs.size() is in bytes
  54      -  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
  55      -                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
  56      -
  57      -  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
  58      -
  59      -  guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
  60      -  // For now we'll just commit all of the bit map up fromt.
  61      -  // Later on we'll try to be more parsimonious with swap.
  62      -  guarantee(_virtual_space.initialize(brs, brs.size()),
  63      -            "couldn't reseve backing store for concurrent marking bit map");
  64      -  assert(_virtual_space.committed_size() == brs.size(),
  65      -         "didn't reserve backing store for all of concurrent marking bit map?");
  66      -  _bm.set_map((uintptr_t*)_virtual_space.low());
  67      -  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
  68      -         _bmWordSize, "inconsistency in bit map sizing");
  69      -  _bm.set_size(_bmWordSize >> _shifter);
       52 +  _bmStartWord = 0;
       53 +  _bmWordSize = 0;
  70   54  }
  71   55  
  72   56  HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  73   57                                                 HeapWord* limit) const {
  74   58    // First we must round addr *up* to a possible object boundary.
  75   59    addr = (HeapWord*)align_size_up((intptr_t)addr,
  76   60                                    HeapWordSize << _shifter);
  77   61    size_t addrOffset = heapWordToOffset(addr);
  78   62    if (limit == NULL) {
  79   63      limit = _bmStartWord + _bmWordSize;
↓ open down ↓ 21 lines elided ↑ open up ↑
 101   85           "get_next_one postcondition");
 102   86    return nextAddr;
 103   87  }
 104   88  
 105   89  int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 106   90    assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 107   91    return (int) (diff >> _shifter);
 108   92  }
 109   93  
 110   94  #ifndef PRODUCT
 111      -bool CMBitMapRO::covers(ReservedSpace rs) const {
       95 +bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
 112   96    // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 113   97    assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 114   98           "size inconsistency");
 115      -  return _bmStartWord == (HeapWord*)(rs.base()) &&
 116      -         _bmWordSize  == rs.size()>>LogHeapWordSize;
       99 +  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
      100 +         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 117  101  }
 118  102  #endif
 119  103  
      104 +bool CMBitMap::allocate(ReservedSpace heap_rs) {
      105 +  _bmStartWord = (HeapWord*)(heap_rs.base());
      106 +  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
      107 +  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
      108 +                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
      109 +  if (!brs.is_reserved()) {
      110 +    warning("ConcurrentMark marking bit map allocation failure");
      111 +    return false;
      112 +  }
      113 +  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
      114 +  // For now we'll just commit all of the bit map up front.
      115 +  // Later on we'll try to be more parsimonious with swap.
      116 +  if (!_virtual_space.initialize(brs, brs.size())) {
      117 +    warning("ConcurrentMark marking bit map backing store failure");
      118 +    return false;
      119 +  }
      120 +  assert(_virtual_space.committed_size() == brs.size(),
      121 +         "didn't reserve backing store for all of concurrent marking bit map?");
      122 +  _bm.set_map((uintptr_t*)_virtual_space.low());
      123 +  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
      124 +         _bmWordSize, "inconsistency in bit map sizing");
      125 +  _bm.set_size(_bmWordSize >> _shifter);
      126 +  return true;
      127 +}
      128 +
 120  129  void CMBitMap::clearAll() {
 121  130    _bm.clear();
 122  131    return;
 123  132  }
 124  133  
 125  134  void CMBitMap::markRange(MemRegion mr) {
 126  135    mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 127  136    assert(!mr.is_empty(), "unexpected empty region");
 128  137    assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 129  138            ((HeapWord *) mr.end())),
↓ open down ↓ 26 lines elided ↑ open up ↑
 156  165  }
 157  166  
 158  167  CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 159  168    _base(NULL), _cm(cm)
 160  169  #ifdef ASSERT
 161  170    , _drain_in_progress(false)
 162  171    , _drain_in_progress_yields(false)
 163  172  #endif
 164  173  {}
 165  174  
 166      -void CMMarkStack::allocate(size_t size) {
 167      -  _base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
 168      -  if (_base == NULL) {
 169      -    vm_exit_during_initialization("Failed to allocate CM region mark stack");
      175 +bool CMMarkStack::allocate(size_t capacity) {
      176 +  // allocate a stack of the requisite depth
      177 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
      178 +  if (!rs.is_reserved()) {
      179 +    warning("ConcurrentMark MarkStack allocation failure");
      180 +    return false;
 170  181    }
 171      -  _index = 0;
 172      -  _capacity = (jint) size;
      182 +  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
      183 +  if (!_virtual_space.initialize(rs, rs.size())) {
      184 +    warning("ConcurrentMark MarkStack backing store failure");
      185 +    // Release the virtual memory reserved for the marking stack
      186 +    rs.release();
      187 +    return false;
      188 +  }
      189 +  assert(_virtual_space.committed_size() == rs.size(),
      190 +         "Didn't reserve backing store for all of ConcurrentMark stack?");
      191 +  _base = (oop*) _virtual_space.low();
      192 +  setEmpty();
      193 +  _capacity = (jint) capacity;
 173  194    _saved_index = -1;
 174  195    NOT_PRODUCT(_max_depth = 0);
      196 +  return true;
      197 +}
      198 +
      199 +void CMMarkStack::expand() {
      200 +  // Called, during remark, if we've overflown the marking stack during marking.
      201 +  assert(isEmpty(), "stack should been emptied while handling overflow");
      202 +  assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
      203 +  // Clear expansion flag
      204 +  _should_expand = false;
      205 +  if (_capacity == (jint) MarkStackSizeMax) {
      206 +    if (PrintGCDetails && Verbose) {
      207 +      gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
      208 +    }
      209 +    return;
      210 +  }
      211 +  // Double capacity if possible
      212 +  jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
      213 +  // Do not give up existing stack until we have managed to
      214 +  // get the double capacity that we desired.
      215 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
      216 +                                                           sizeof(oop)));
      217 +  if (rs.is_reserved()) {
      218 +    // Release the backing store associated with old stack
      219 +    _virtual_space.release();
      220 +    // Reinitialize virtual space for new stack
      221 +    if (!_virtual_space.initialize(rs, rs.size())) {
      222 +      fatal("Not enough swap for expanded marking stack capacity");
      223 +    }
      224 +    _base = (oop*)(_virtual_space.low());
      225 +    _index = 0;
      226 +    _capacity = new_capacity;
      227 +  } else {
      228 +    if (PrintGCDetails && Verbose) {
      229 +      // Failed to double capacity, continue;
      230 +      gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
      231 +                          SIZE_FORMAT"K to " SIZE_FORMAT"K",
      232 +                          _capacity / K, new_capacity / K);
      233 +    }
      234 +  }
      235 +}
      236 +
      237 +void CMMarkStack::set_should_expand() {
      238 +  // If we're resetting the marking state because of an
      239 +  // marking stack overflow, record that we should, if
      240 +  // possible, expand the stack.
      241 +  _should_expand = _cm->has_overflown();
 175  242  }
 176  243  
 177  244  CMMarkStack::~CMMarkStack() {
 178  245    if (_base != NULL) {
 179      -    FREE_C_HEAP_ARRAY(oop, _base, mtGC);
      246 +    _base = NULL;
      247 +    _virtual_space.release();
 180  248    }
 181  249  }
 182  250  
 183  251  void CMMarkStack::par_push(oop ptr) {
 184  252    while (true) {
 185  253      if (isFull()) {
 186  254        _overflow = true;
 187  255        return;
 188  256      }
 189  257      // Otherwise...
↓ open down ↓ 20 lines elided ↑ open up ↑
 210  278      // Otherwise...
 211  279      jint index = _index;
 212  280      jint next_index = index + n;
 213  281      if (next_index > _capacity) {
 214  282        _overflow = true;
 215  283        return;
 216  284      }
 217  285      jint res = Atomic::cmpxchg(next_index, &_index, index);
 218  286      if (res == index) {
 219  287        for (int i = 0; i < n; i++) {
 220      -        int ind = index + i;
      288 +        int  ind = index + i;
 221  289          assert(ind < _capacity, "By overflow test above.");
 222  290          _base[ind] = ptr_arr[i];
 223  291        }
 224  292        NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 225  293        return;
 226  294      }
 227  295      // Otherwise, we need to try again.
 228  296    }
 229  297  }
 230  298  
 231      -
 232  299  void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 233  300    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 234  301    jint start = _index;
 235  302    jint next_index = start + n;
 236  303    if (next_index > _capacity) {
 237  304      _overflow = true;
 238  305      return;
 239  306    }
 240  307    // Otherwise.
 241  308    _index = next_index;
 242  309    for (int i = 0; i < n; i++) {
 243  310      int ind = start + i;
 244  311      assert(ind < _capacity, "By overflow test above.");
 245  312      _base[ind] = ptr_arr[i];
 246  313    }
      314 +  NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 247  315  }
 248  316  
 249      -
 250  317  bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 251  318    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 252  319    jint index = _index;
 253  320    if (index == 0) {
 254  321      *n = 0;
 255  322      return false;
 256  323    } else {
 257  324      int k = MIN2(max, index);
 258      -    jint new_ind = index - k;
      325 +    jint  new_ind = index - k;
 259  326      for (int j = 0; j < k; j++) {
 260  327        ptr_arr[j] = _base[new_ind + j];
 261  328      }
 262  329      _index = new_ind;
 263  330      *n = k;
 264  331      return true;
 265  332    }
 266  333  }
 267  334  
 268  335  template<class OopClosureClass>
↓ open down ↓ 128 lines elided ↑ open up ↑
 397  464  }
 398  465  
 399  466  #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 400  467  #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 401  468  #endif // _MSC_VER
 402  469  
 403  470  uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 404  471    return MAX2((n_par_threads + 2) / 4, 1U);
 405  472  }
 406  473  
 407      -ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
 408      -  _markBitMap1(rs, MinObjAlignment - 1),
 409      -  _markBitMap2(rs, MinObjAlignment - 1),
      474 +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
      475 +  _g1h(g1h),
      476 +  _markBitMap1(MinObjAlignment - 1),
      477 +  _markBitMap2(MinObjAlignment - 1),
 410  478  
 411  479    _parallel_marking_threads(0),
 412  480    _max_parallel_marking_threads(0),
 413  481    _sleep_factor(0.0),
 414  482    _marking_task_overhead(1.0),
 415  483    _cleanup_sleep_factor(0.0),
 416  484    _cleanup_task_overhead(1.0),
 417  485    _cleanup_list("Cleanup List"),
 418      -  _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
 419      -  _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
 420      -           CardTableModRefBS::card_shift,
 421      -           false /* in_resource_area*/),
      486 +  _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
      487 +  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
      488 +            CardTableModRefBS::card_shift,
      489 +            false /* in_resource_area*/),
 422  490  
 423  491    _prevMarkBitMap(&_markBitMap1),
 424  492    _nextMarkBitMap(&_markBitMap2),
 425  493  
 426  494    _markStack(this),
 427  495    // _finger set in set_non_marking_state
 428  496  
 429  497    _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 430  498    // _active_tasks set in set_non_marking_state
 431  499    // _tasks set inside the constructor
↓ open down ↓ 10 lines elided ↑ open up ↑
 442  510  
 443  511    _init_times(),
 444  512    _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 445  513    _cleanup_times(),
 446  514    _total_counting_time(0.0),
 447  515    _total_rs_scrub_time(0.0),
 448  516  
 449  517    _parallel_workers(NULL),
 450  518  
 451  519    _count_card_bitmaps(NULL),
 452      -  _count_marked_bytes(NULL) {
      520 +  _count_marked_bytes(NULL),
      521 +  _completed_initialization(false) {
 453  522    CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 454  523    if (verbose_level < no_verbose) {
 455  524      verbose_level = no_verbose;
 456  525    }
 457  526    if (verbose_level > high_verbose) {
 458  527      verbose_level = high_verbose;
 459  528    }
 460  529    _verbose_level = verbose_level;
 461  530  
 462  531    if (verbose_low()) {
 463  532      gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 464  533                             "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 465  534    }
 466  535  
 467      -  _markStack.allocate(MarkStackSize);
      536 +  if (!_markBitMap1.allocate(heap_rs)) {
      537 +    warning("Failed to allocate first CM bit map");
      538 +    return;
      539 +  }
      540 +  if (!_markBitMap2.allocate(heap_rs)) {
      541 +    warning("Failed to allocate second CM bit map");
      542 +    return;
      543 +  }
 468  544  
 469  545    // Create & start a ConcurrentMark thread.
 470  546    _cmThread = new ConcurrentMarkThread(this);
 471  547    assert(cmThread() != NULL, "CM Thread should have been created");
 472  548    assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 473  549  
 474      -  _g1h = G1CollectedHeap::heap();
 475  550    assert(CGC_lock != NULL, "Where's the CGC_lock?");
 476      -  assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
 477      -  assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
      551 +  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
      552 +  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 478  553  
 479  554    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 480  555    satb_qs.set_buffer_size(G1SATBBufferSize);
 481  556  
 482  557    _root_regions.init(_g1h, this);
 483  558  
 484      -  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 485      -  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 486      -
 487      -  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 488      -  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 489      -
 490      -  BitMap::idx_t card_bm_size = _card_bm.size();
 491      -
 492      -  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 493      -  _active_tasks = _max_worker_id;
 494      -  for (uint i = 0; i < _max_worker_id; ++i) {
 495      -    CMTaskQueue* task_queue = new CMTaskQueue();
 496      -    task_queue->initialize();
 497      -    _task_queues->register_queue(i, task_queue);
 498      -
 499      -    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 500      -    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
 501      -
 502      -    _tasks[i] = new CMTask(i, this,
 503      -                           _count_marked_bytes[i],
 504      -                           &_count_card_bitmaps[i],
 505      -                           task_queue, _task_queues);
 506      -
 507      -    _accum_task_vtime[i] = 0.0;
 508      -  }
 509      -
 510      -  // Calculate the card number for the bottom of the heap. Used
 511      -  // in biasing indexes into the accounting card bitmaps.
 512      -  _heap_bottom_card_num =
 513      -    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 514      -                                CardTableModRefBS::card_shift);
 515      -
 516      -  // Clear all the liveness counting data
 517      -  clear_all_count_data();
 518      -
 519  559    if (ConcGCThreads > ParallelGCThreads) {
 520      -    vm_exit_during_initialization("Can't have more ConcGCThreads "
 521      -                                  "than ParallelGCThreads.");
      560 +    warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
      561 +            "than ParallelGCThreads (" UINT32_FORMAT ").",
      562 +            ConcGCThreads, ParallelGCThreads);
      563 +    return;
 522  564    }
 523  565    if (ParallelGCThreads == 0) {
 524  566      // if we are not running with any parallel GC threads we will not
 525  567      // spawn any marking threads either
 526  568      _parallel_marking_threads =       0;
 527  569      _max_parallel_marking_threads =   0;
 528  570      _sleep_factor             =     0.0;
 529  571      _marking_task_overhead    =     1.0;
 530  572    } else {
 531  573      if (ConcGCThreads > 0) {
↓ open down ↓ 51 lines elided ↑ open up ↑
 583  625      guarantee(parallel_marking_threads() > 0, "peace of mind");
 584  626      _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 585  627           _max_parallel_marking_threads, false, true);
 586  628      if (_parallel_workers == NULL) {
 587  629        vm_exit_during_initialization("Failed necessary allocation.");
 588  630      } else {
 589  631        _parallel_workers->initialize_workers();
 590  632      }
 591  633    }
 592  634  
      635 +  if (FLAG_IS_DEFAULT(MarkStackSize)) {
      636 +    uintx mark_stack_size =
      637 +      MIN2(MarkStackSizeMax,
      638 +          MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
      639 +    // Verify that the calculated value for MarkStackSize is in range.
      640 +    // It would be nice to use the private utility routine from Arguments.
      641 +    if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
      642 +      warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
      643 +              "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
      644 +              mark_stack_size, 1, MarkStackSizeMax);
      645 +      return;
      646 +    }
      647 +    FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
      648 +  } else {
      649 +    // Verify MarkStackSize is in range.
      650 +    if (FLAG_IS_CMDLINE(MarkStackSize)) {
      651 +      if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
      652 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
      653 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
      654 +                  "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
      655 +                  MarkStackSize, 1, MarkStackSizeMax);
      656 +          return;
      657 +        }
      658 +      } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
      659 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
      660 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
      661 +                  " or for MarkStackSizeMax (" UINTX_FORMAT ")",
      662 +                  MarkStackSize, MarkStackSizeMax);
      663 +          return;
      664 +        }
      665 +      }
      666 +    }
      667 +  }
      668 +
      669 +  if (!_markStack.allocate(MarkStackSize)) {
      670 +    warning("Failed to allocate CM marking stack");
      671 +    return;
      672 +  }
      673 +
      674 +  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
      675 +  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
      676 +
      677 +  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
      678 +  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
      679 +
      680 +  BitMap::idx_t card_bm_size = _card_bm.size();
      681 +
      682 +  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
      683 +  _active_tasks = _max_worker_id;
      684 +
      685 +  size_t max_regions = (size_t) _g1h->max_regions();
      686 +  for (uint i = 0; i < _max_worker_id; ++i) {
      687 +    CMTaskQueue* task_queue = new CMTaskQueue();
      688 +    task_queue->initialize();
      689 +    _task_queues->register_queue(i, task_queue);
      690 +
      691 +    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
      692 +    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
      693 +
      694 +    _tasks[i] = new CMTask(i, this,
      695 +                           _count_marked_bytes[i],
      696 +                           &_count_card_bitmaps[i],
      697 +                           task_queue, _task_queues);
      698 +
      699 +    _accum_task_vtime[i] = 0.0;
      700 +  }
      701 +
      702 +  // Calculate the card number for the bottom of the heap. Used
      703 +  // in biasing indexes into the accounting card bitmaps.
      704 +  _heap_bottom_card_num =
      705 +    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
      706 +                                CardTableModRefBS::card_shift);
      707 +
      708 +  // Clear all the liveness counting data
      709 +  clear_all_count_data();
      710 +
 593  711    // so that the call below can read a sensible value
 594      -  _heap_start = (HeapWord*) rs.base();
      712 +  _heap_start = (HeapWord*) heap_rs.base();
 595  713    set_non_marking_state();
      714 +  _completed_initialization = true;
 596  715  }
 597  716  
 598  717  void ConcurrentMark::update_g1_committed(bool force) {
 599  718    // If concurrent marking is not in progress, then we do not need to
 600  719    // update _heap_end.
 601  720    if (!concurrent_marking_in_progress() && !force) return;
 602  721  
 603  722    MemRegion committed = _g1h->g1_committed();
 604  723    assert(committed.start() == _heap_start, "start shouldn't change");
 605  724    HeapWord* new_end = committed.end();
↓ open down ↓ 552 lines elided ↑ open up ↑
1158 1277      if (VerifyDuringGC) {
1159 1278        HandleMark hm;  // handle scope
1160 1279        gclog_or_tty->print(" VerifyDuringGC:(after)");
1161 1280        Universe::heap()->prepare_for_verify();
1162 1281        Universe::verify(/* silent      */ false,
1163 1282                         /* option      */ VerifyOption_G1UseNextMarking);
1164 1283      }
1165 1284      assert(!restart_for_overflow(), "sanity");
1166 1285    }
1167 1286  
     1287 +  // Expand the marking stack, if we have to and if we can.
     1288 +  if (_markStack.should_expand()) {
     1289 +    _markStack.expand();
     1290 +  }
     1291 +
1168 1292    // Reset the marking state if marking completed
1169 1293    if (!restart_for_overflow()) {
1170 1294      set_non_marking_state();
1171 1295    }
1172 1296  
1173 1297  #if VERIFY_OBJS_PROCESSED
1174 1298    _scan_obj_cl.objs_processed = 0;
1175 1299    ThreadLocalObjQueue::objs_enqueued = 0;
1176 1300  #endif
1177 1301  
↓ open down ↓ 1600 lines elided ↑ open up ↑
2778 2902    VerifyNoCSetOopsClosure cl;
2779 2903  
2780 2904    if (verify_stacks) {
2781 2905      // Verify entries on the global mark stack
2782 2906      cl.set_phase(VerifyNoCSetOopsStack);
2783 2907      _markStack.oops_do(&cl);
2784 2908  
2785 2909      // Verify entries on the task queues
2786 2910      for (uint i = 0; i < _max_worker_id; i += 1) {
2787 2911        cl.set_phase(VerifyNoCSetOopsQueues, i);
2788      -      OopTaskQueue* queue = _task_queues->queue(i);
     2912 +      CMTaskQueue* queue = _task_queues->queue(i);
2789 2913        queue->oops_do(&cl);
2790 2914      }
2791 2915    }
2792 2916  
2793 2917    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2794 2918  
2795 2919    // Verify entries on the enqueued SATB buffers
2796 2920    if (verify_enqueued_buffers) {
2797 2921      cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2798 2922      satb_qs.iterate_completed_buffers_read_only(&cl);
↓ open down ↓ 34 lines elided ↑ open up ↑
2833 2957                    !task_hr->in_collection_set(),
2834 2958                    err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2835 2959                            task_finger, HR_FORMAT_PARAMS(task_hr)));
2836 2960        }
2837 2961      }
2838 2962    }
2839 2963  }
2840 2964  #endif // PRODUCT
2841 2965  
2842 2966  void ConcurrentMark::clear_marking_state(bool clear_overflow) {
2843      -  _markStack.setEmpty();
2844      -  _markStack.clear_overflow();
     2967 +  _markStack.set_should_expand();
     2968 +  _markStack.setEmpty();        // Also clears the _markStack overflow flag
2845 2969    if (clear_overflow) {
2846 2970      clear_has_overflown();
2847 2971    } else {
2848 2972      assert(has_overflown(), "pre-condition");
2849 2973    }
2850 2974    _finger = _heap_start;
2851 2975  
2852 2976    for (uint i = 0; i < _max_worker_id; ++i) {
2853      -    OopTaskQueue* queue = _task_queues->queue(i);
     2977 +    CMTaskQueue* queue = _task_queues->queue(i);
2854 2978      queue->set_empty();
2855 2979    }
2856 2980  }
2857 2981  
2858 2982  // Aggregate the counting data that was constructed concurrently
2859 2983  // with marking.
2860 2984  class AggregateCountDataHRClosure: public HeapRegionClosure {
2861 2985    G1CollectedHeap* _g1h;
2862 2986    ConcurrentMark* _cm;
2863 2987    CardTableModRefBS* _ct_bs;
↓ open down ↓ 1583 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX