< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page
rev 8978 : imported patch remove_err_msg


 382       break;
 383     }
 384   }
 385   debug_only(_drain_in_progress = false);
 386   return res;
 387 }
 388 
 389 void CMMarkStack::note_start_of_gc() {
 390   assert(_saved_index == -1,
 391          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 392   _saved_index = _index;
 393 }
 394 
 395 void CMMarkStack::note_end_of_gc() {
 396   // This is intentionally a guarantee, instead of an assert. If we
 397   // accidentally add something to the mark stack during GC, it
 398   // will be a correctness issue so it's better if we crash. we'll
 399   // only check this once per GC anyway, so it won't be a performance
 400   // issue in any way.
 401   guarantee(_saved_index == _index,
 402             err_msg("saved index: %d index: %d", _saved_index, _index));
 403   _saved_index = -1;
 404 }
 405 
 406 CMRootRegions::CMRootRegions() :
 407   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 408   _should_abort(false),  _next_survivor(NULL) { }
 409 
 410 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 411   _young_list = g1h->young_list();
 412   _cm = cm;
 413 }
 414 
 415 void CMRootRegions::prepare_for_scan() {
 416   assert(!scan_in_progress(), "pre-condition");
 417 
 418   // Currently, only survivors can be root regions.
 419   assert(_next_survivor == NULL, "pre-condition");
 420   _next_survivor = _young_list->first_survivor_region();
 421   _scan_in_progress = (_next_survivor != NULL);
 422   _should_abort = false;


 777   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 778   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 779 }
 780 
 781 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 782   set_concurrency(active_tasks);
 783 
 784   _concurrent = concurrent;
 785   // We propagate this to all tasks, not just the active ones.
 786   for (uint i = 0; i < _max_worker_id; ++i)
 787     _tasks[i]->set_concurrent(concurrent);
 788 
 789   if (concurrent) {
 790     set_concurrent_marking_in_progress();
 791   } else {
 792     // We currently assume that the concurrent flag has been set to
 793     // false before we start remark. At this point we should also be
 794     // in a STW phase.
 795     assert(!concurrent_marking_in_progress(), "invariant");
 796     assert(out_of_regions(),
 797            err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 798                    p2i(_finger), p2i(_heap_end)));
 799   }
 800 }
 801 
 802 void ConcurrentMark::set_non_marking_state() {
 803   // We set the global marking state to some default values when we're
 804   // not doing marking.
 805   reset_marking_state();
 806   _active_tasks = 0;
 807   clear_concurrent_marking_in_progress();
 808 }
 809 
 810 ConcurrentMark::~ConcurrentMark() {
 811   // The ConcurrentMark instance is never freed.
 812   ShouldNotReachHere();
 813 }
 814 
 815 void ConcurrentMark::clearNextBitmap() {
 816   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 817 
 818   // Make sure that the concurrent mark thread looks to still be in


1398     CMCountDataClosureBase(g1h, region_bm, card_bm),
1399     _bm(bm), _region_marked_bytes(0) { }
1400 
1401   bool doHeapRegion(HeapRegion* hr) {
1402 
1403     if (hr->is_continues_humongous()) {
1404       // We will ignore these here and process them when their
1405       // associated "starts humongous" region is processed (see
1406       // set_bit_for_heap_region()). Note that we cannot rely on their
1407       // associated "starts humongous" region to have their bit set to
1408       // 1 since, due to the region chunking in the parallel region
1409       // iteration, a "continues humongous" region might be visited
1410       // before its associated "starts humongous".
1411       return false;
1412     }
1413 
1414     HeapWord* ntams = hr->next_top_at_mark_start();
1415     HeapWord* start = hr->bottom();
1416 
1417     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1418            err_msg("Preconditions not met - "
1419                    "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1420                    p2i(start), p2i(ntams), p2i(hr->end())));
1421 
1422     // Find the first marked object at or after "start".
1423     start = _bm->getNextMarkedWordAddress(start, ntams);
1424 
1425     size_t marked_bytes = 0;
1426 
1427     while (start < ntams) {
1428       oop obj = oop(start);
1429       int obj_sz = obj->size();
1430       HeapWord* obj_end = start + obj_sz;
1431 
1432       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1433       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1434 
1435       // Note: if we're looking at the last region in heap - obj_end
1436       // could be actually just beyond the end of the heap; end_idx
1437       // will then correspond to a (non-existent) card that is also
1438       // just beyond the heap.
1439       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1440         // end of object is not card aligned - increment to cover


1700     // Mark the allocated-since-marking portion...
1701     if (ntams < top) {
1702       // This definitely means the region has live objects.
1703       set_bit_for_region(hr);
1704 
1705       // Now set the bits in the card bitmap for [ntams, top)
1706       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1707       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1708 
1709       // Note: if we're looking at the last region in heap - top
1710       // could be actually just beyond the end of the heap; end_idx
1711       // will then correspond to a (non-existent) card that is also
1712       // just beyond the heap.
1713       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1714         // end of object is not card aligned - increment to cover
1715         // all the cards spanned by the object
1716         end_idx += 1;
1717       }
1718 
1719       assert(end_idx <= _card_bm->size(),
1720              err_msg("oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1721                      end_idx, _card_bm->size()));
1722       assert(start_idx < _card_bm->size(),
1723              err_msg("oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1724                      start_idx, _card_bm->size()));
1725 
1726       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1727     }
1728 
1729     // Set the bit for the region if it contains live data
1730     if (hr->next_marked_bytes() > 0) {
1731       set_bit_for_region(hr);
1732     }
1733 
1734     return false;
1735   }
1736 };
1737 
1738 class G1ParFinalCountTask: public AbstractGangTask {
1739 protected:
1740   G1CollectedHeap* _g1h;
1741   ConcurrentMark* _cm;
1742   BitMap* _actual_region_bm;
1743   BitMap* _actual_card_bm;
1744 


2454   _nextMarkBitMap  = (CMBitMap*)  temp;
2455 }
2456 
2457 // Closure for marking entries in SATB buffers.
2458 class CMSATBBufferClosure : public SATBBufferClosure {
2459 private:
2460   CMTask* _task;
2461   G1CollectedHeap* _g1h;
2462 
2463   // This is very similar to CMTask::deal_with_reference, but with
2464   // more relaxed requirements for the argument, so this must be more
2465   // circumspect about treating the argument as an object.
2466   void do_entry(void* entry) const {
2467     _task->increment_refs_reached();
2468     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2469     if (entry < hr->next_top_at_mark_start()) {
2470       // Until we get here, we don't know whether entry refers to a valid
2471       // object; it could instead have been a stale reference.
2472       oop obj = static_cast<oop>(entry);
2473       assert(obj->is_oop(true /* ignore mark word */),
2474              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2475       _task->make_reference_grey(obj, hr);
2476     }
2477   }
2478 
2479 public:
2480   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2481     : _task(task), _g1h(g1h) { }
2482 
2483   virtual void do_buffer(void** buffer, size_t size) {
2484     for (size_t i = 0; i < size; ++i) {
2485       do_entry(buffer[i]);
2486     }
2487   }
2488 };
2489 
2490 class G1RemarkThreadsClosure : public ThreadClosure {
2491   CMSATBBufferClosure _cm_satb_cl;
2492   G1CMOopClosure _cm_cl;
2493   MarkingCodeBlobClosure _code_cl;
2494   int _thread_parity;


2571   uint active_workers = g1h->workers()->active_workers();
2572   set_concurrency_and_phase(active_workers, false /* concurrent */);
2573   // Leave _parallel_marking_threads at it's
2574   // value originally calculated in the ConcurrentMark
2575   // constructor and pass values of the active workers
2576   // through the gang in the task.
2577 
2578   {
2579     StrongRootsScope srs(active_workers);
2580 
2581     CMRemarkTask remarkTask(this, active_workers);
2582     // We will start all available threads, even if we decide that the
2583     // active_workers will be fewer. The extra ones will just bail out
2584     // immediately.
2585     g1h->workers()->run_task(&remarkTask);
2586   }
2587 
2588   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2589   guarantee(has_overflown() ||
2590             satb_mq_set.completed_buffers_num() == 0,
2591             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2592                     BOOL_TO_STR(has_overflown()),
2593                     satb_mq_set.completed_buffers_num()));
2594 
2595   print_stats();
2596 }
2597 
2598 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2599   // Note we are overriding the read-only view of the prev map here, via
2600   // the cast.
2601   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2602 }
2603 
2604 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2605   _nextMarkBitMap->clearRange(mr);
2606 }
2607 
2608 HeapRegion*
2609 ConcurrentMark::claim_region(uint worker_id) {
2610   // "checkpoint" the finger
2611   HeapWord* finger = _finger;
2612 
2613   // _heap_end will not change underneath our feet; it only changes at


2707 
2708   return NULL;
2709 }
2710 
2711 #ifndef PRODUCT
2712 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
2713 private:
2714   G1CollectedHeap* _g1h;
2715   const char* _phase;
2716   int _info;
2717 
2718 public:
2719   VerifyNoCSetOops(const char* phase, int info = -1) :
2720     _g1h(G1CollectedHeap::heap()),
2721     _phase(phase),
2722     _info(info)
2723   { }
2724 
2725   void operator()(oop obj) const {
2726     guarantee(obj->is_oop(),
2727               err_msg("Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2728                       p2i(obj), _phase, _info));
2729     guarantee(!_g1h->obj_in_cs(obj),
2730               err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2731                       p2i(obj), _phase, _info));
2732   }
2733 };
2734 
2735 void ConcurrentMark::verify_no_cset_oops() {
2736   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2737   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2738     return;
2739   }
2740 
2741   // Verify entries on the global mark stack
2742   _markStack.iterate(VerifyNoCSetOops("Stack"));
2743 
2744   // Verify entries on the task queues
2745   for (uint i = 0; i < _max_worker_id; ++i) {
2746     CMTaskQueue* queue = _task_queues->queue(i);
2747     queue->iterate(VerifyNoCSetOops("Queue", i));
2748   }
2749 
2750   // Verify the global finger
2751   HeapWord* global_finger = finger();
2752   if (global_finger != NULL && global_finger < _heap_end) {
2753     // The global finger always points to a heap region boundary. We
2754     // use heap_region_containing_raw() to get the containing region
2755     // given that the global finger could be pointing to a free region
2756     // which subsequently becomes continues humongous. If that
2757     // happens, heap_region_containing() will return the bottom of the
2758     // corresponding starts humongous region and the check below will
2759     // not hold any more.
2760     // Since we always iterate over all regions, we might get a NULL HeapRegion
2761     // here.
2762     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2763     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2764               err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT,
2765                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2766   }
2767 
2768   // Verify the task fingers
2769   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2770   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2771     CMTask* task = _tasks[i];
2772     HeapWord* task_finger = task->finger();
2773     if (task_finger != NULL && task_finger < _heap_end) {
2774       // See above note on the global finger verification.
2775       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2776       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2777                 !task_hr->in_collection_set(),
2778                 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT,
2779                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2780     }
2781   }
2782 }
2783 #endif // PRODUCT
2784 
2785 // Aggregate the counting data that was constructed concurrently
2786 // with marking.
2787 class AggregateCountDataHRClosure: public HeapRegionClosure {
2788   G1CollectedHeap* _g1h;
2789   ConcurrentMark* _cm;
2790   CardTableModRefBS* _ct_bs;
2791   BitMap* _cm_card_bm;
2792   uint _max_worker_id;
2793 
2794  public:
2795   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2796                               BitMap* cm_card_bm,
2797                               uint max_worker_id) :
2798     _g1h(g1h), _cm(g1h->concurrent_mark()),
2799     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2800     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2801 
2802   bool doHeapRegion(HeapRegion* hr) {
2803     if (hr->is_continues_humongous()) {
2804       // We will ignore these here and process them when their
2805       // associated "starts humongous" region is processed.
2806       // Note that we cannot rely on their associated
2807       // "starts humongous" region to have their bit set to 1
2808       // since, due to the region chunking in the parallel region
2809       // iteration, a "continues humongous" region might be visited
2810       // before its associated "starts humongous".
2811       return false;
2812     }
2813 
2814     HeapWord* start = hr->bottom();
2815     HeapWord* limit = hr->next_top_at_mark_start();
2816     HeapWord* end = hr->end();
2817 
2818     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2819            err_msg("Preconditions not met - "
2820                    "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2821                    "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2822                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2823 
2824     assert(hr->next_marked_bytes() == 0, "Precondition");
2825 
2826     if (start == limit) {
2827       // NTAMS of this region has not been set so nothing to do.
2828       return false;
2829     }
2830 
2831     // 'start' should be in the heap.
2832     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2833     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2834     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2835 
2836     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2837     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2838     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2839 
2840     // If ntams is not card aligned then we bump card bitmap index
2841     // for limit so that we get the all the cards spanned by
2842     // the object ending at ntams.




 382       break;
 383     }
 384   }
 385   debug_only(_drain_in_progress = false);
 386   return res;
 387 }
 388 
 389 void CMMarkStack::note_start_of_gc() {
 390   assert(_saved_index == -1,
 391          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 392   _saved_index = _index;
 393 }
 394 
 395 void CMMarkStack::note_end_of_gc() {
 396   // This is intentionally a guarantee, instead of an assert. If we
 397   // accidentally add something to the mark stack during GC, it
 398   // will be a correctness issue so it's better if we crash. we'll
 399   // only check this once per GC anyway, so it won't be a performance
 400   // issue in any way.
 401   guarantee(_saved_index == _index,
 402             "saved index: %d index: %d", _saved_index, _index);
 403   _saved_index = -1;
 404 }
 405 
 406 CMRootRegions::CMRootRegions() :
 407   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 408   _should_abort(false),  _next_survivor(NULL) { }
 409 
 410 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 411   _young_list = g1h->young_list();
 412   _cm = cm;
 413 }
 414 
 415 void CMRootRegions::prepare_for_scan() {
 416   assert(!scan_in_progress(), "pre-condition");
 417 
 418   // Currently, only survivors can be root regions.
 419   assert(_next_survivor == NULL, "pre-condition");
 420   _next_survivor = _young_list->first_survivor_region();
 421   _scan_in_progress = (_next_survivor != NULL);
 422   _should_abort = false;


 777   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 778   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 779 }
 780 
 781 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 782   set_concurrency(active_tasks);
 783 
 784   _concurrent = concurrent;
 785   // We propagate this to all tasks, not just the active ones.
 786   for (uint i = 0; i < _max_worker_id; ++i)
 787     _tasks[i]->set_concurrent(concurrent);
 788 
 789   if (concurrent) {
 790     set_concurrent_marking_in_progress();
 791   } else {
 792     // We currently assume that the concurrent flag has been set to
 793     // false before we start remark. At this point we should also be
 794     // in a STW phase.
 795     assert(!concurrent_marking_in_progress(), "invariant");
 796     assert(out_of_regions(),
 797            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 798            p2i(_finger), p2i(_heap_end));
 799   }
 800 }
 801 
 802 void ConcurrentMark::set_non_marking_state() {
 803   // We set the global marking state to some default values when we're
 804   // not doing marking.
 805   reset_marking_state();
 806   _active_tasks = 0;
 807   clear_concurrent_marking_in_progress();
 808 }
 809 
 810 ConcurrentMark::~ConcurrentMark() {
 811   // The ConcurrentMark instance is never freed.
 812   ShouldNotReachHere();
 813 }
 814 
 815 void ConcurrentMark::clearNextBitmap() {
 816   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 817 
 818   // Make sure that the concurrent mark thread looks to still be in


1398     CMCountDataClosureBase(g1h, region_bm, card_bm),
1399     _bm(bm), _region_marked_bytes(0) { }
1400 
1401   bool doHeapRegion(HeapRegion* hr) {
1402 
1403     if (hr->is_continues_humongous()) {
1404       // We will ignore these here and process them when their
1405       // associated "starts humongous" region is processed (see
1406       // set_bit_for_heap_region()). Note that we cannot rely on their
1407       // associated "starts humongous" region to have their bit set to
1408       // 1 since, due to the region chunking in the parallel region
1409       // iteration, a "continues humongous" region might be visited
1410       // before its associated "starts humongous".
1411       return false;
1412     }
1413 
1414     HeapWord* ntams = hr->next_top_at_mark_start();
1415     HeapWord* start = hr->bottom();
1416 
1417     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1418            "Preconditions not met - "
1419            "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1420            p2i(start), p2i(ntams), p2i(hr->end()));
1421 
1422     // Find the first marked object at or after "start".
1423     start = _bm->getNextMarkedWordAddress(start, ntams);
1424 
1425     size_t marked_bytes = 0;
1426 
1427     while (start < ntams) {
1428       oop obj = oop(start);
1429       int obj_sz = obj->size();
1430       HeapWord* obj_end = start + obj_sz;
1431 
1432       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1433       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1434 
1435       // Note: if we're looking at the last region in heap - obj_end
1436       // could be actually just beyond the end of the heap; end_idx
1437       // will then correspond to a (non-existent) card that is also
1438       // just beyond the heap.
1439       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1440         // end of object is not card aligned - increment to cover


1700     // Mark the allocated-since-marking portion...
1701     if (ntams < top) {
1702       // This definitely means the region has live objects.
1703       set_bit_for_region(hr);
1704 
1705       // Now set the bits in the card bitmap for [ntams, top)
1706       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1707       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1708 
1709       // Note: if we're looking at the last region in heap - top
1710       // could be actually just beyond the end of the heap; end_idx
1711       // will then correspond to a (non-existent) card that is also
1712       // just beyond the heap.
1713       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1714         // end of object is not card aligned - increment to cover
1715         // all the cards spanned by the object
1716         end_idx += 1;
1717       }
1718 
1719       assert(end_idx <= _card_bm->size(),
1720              "oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1721              end_idx, _card_bm->size());
1722       assert(start_idx < _card_bm->size(),
1723              "oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1724              start_idx, _card_bm->size());
1725 
1726       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1727     }
1728 
1729     // Set the bit for the region if it contains live data
1730     if (hr->next_marked_bytes() > 0) {
1731       set_bit_for_region(hr);
1732     }
1733 
1734     return false;
1735   }
1736 };
1737 
1738 class G1ParFinalCountTask: public AbstractGangTask {
1739 protected:
1740   G1CollectedHeap* _g1h;
1741   ConcurrentMark* _cm;
1742   BitMap* _actual_region_bm;
1743   BitMap* _actual_card_bm;
1744 


2454   _nextMarkBitMap  = (CMBitMap*)  temp;
2455 }
2456 
2457 // Closure for marking entries in SATB buffers.
2458 class CMSATBBufferClosure : public SATBBufferClosure {
2459 private:
2460   CMTask* _task;
2461   G1CollectedHeap* _g1h;
2462 
2463   // This is very similar to CMTask::deal_with_reference, but with
2464   // more relaxed requirements for the argument, so this must be more
2465   // circumspect about treating the argument as an object.
2466   void do_entry(void* entry) const {
2467     _task->increment_refs_reached();
2468     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2469     if (entry < hr->next_top_at_mark_start()) {
2470       // Until we get here, we don't know whether entry refers to a valid
2471       // object; it could instead have been a stale reference.
2472       oop obj = static_cast<oop>(entry);
2473       assert(obj->is_oop(true /* ignore mark word */),
2474              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
2475       _task->make_reference_grey(obj, hr);
2476     }
2477   }
2478 
2479 public:
2480   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2481     : _task(task), _g1h(g1h) { }
2482 
2483   virtual void do_buffer(void** buffer, size_t size) {
2484     for (size_t i = 0; i < size; ++i) {
2485       do_entry(buffer[i]);
2486     }
2487   }
2488 };
2489 
2490 class G1RemarkThreadsClosure : public ThreadClosure {
2491   CMSATBBufferClosure _cm_satb_cl;
2492   G1CMOopClosure _cm_cl;
2493   MarkingCodeBlobClosure _code_cl;
2494   int _thread_parity;


2571   uint active_workers = g1h->workers()->active_workers();
2572   set_concurrency_and_phase(active_workers, false /* concurrent */);
2573   // Leave _parallel_marking_threads at it's
2574   // value originally calculated in the ConcurrentMark
2575   // constructor and pass values of the active workers
2576   // through the gang in the task.
2577 
2578   {
2579     StrongRootsScope srs(active_workers);
2580 
2581     CMRemarkTask remarkTask(this, active_workers);
2582     // We will start all available threads, even if we decide that the
2583     // active_workers will be fewer. The extra ones will just bail out
2584     // immediately.
2585     g1h->workers()->run_task(&remarkTask);
2586   }
2587 
2588   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2589   guarantee(has_overflown() ||
2590             satb_mq_set.completed_buffers_num() == 0,
2591             "Invariant: has_overflown = %s, num buffers = %d",
2592             BOOL_TO_STR(has_overflown()),
2593             satb_mq_set.completed_buffers_num());
2594 
2595   print_stats();
2596 }
2597 
2598 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2599   // Note we are overriding the read-only view of the prev map here, via
2600   // the cast.
2601   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2602 }
2603 
2604 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2605   _nextMarkBitMap->clearRange(mr);
2606 }
2607 
2608 HeapRegion*
2609 ConcurrentMark::claim_region(uint worker_id) {
2610   // "checkpoint" the finger
2611   HeapWord* finger = _finger;
2612 
2613   // _heap_end will not change underneath our feet; it only changes at


2707 
2708   return NULL;
2709 }
2710 
2711 #ifndef PRODUCT
2712 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
2713 private:
2714   G1CollectedHeap* _g1h;
2715   const char* _phase;
2716   int _info;
2717 
2718 public:
2719   VerifyNoCSetOops(const char* phase, int info = -1) :
2720     _g1h(G1CollectedHeap::heap()),
2721     _phase(phase),
2722     _info(info)
2723   { }
2724 
2725   void operator()(oop obj) const {
2726     guarantee(obj->is_oop(),
2727               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2728               p2i(obj), _phase, _info);
2729     guarantee(!_g1h->obj_in_cs(obj),
2730               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2731               p2i(obj), _phase, _info);
2732   }
2733 };
2734 
2735 void ConcurrentMark::verify_no_cset_oops() {
2736   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2737   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2738     return;
2739   }
2740 
2741   // Verify entries on the global mark stack
2742   _markStack.iterate(VerifyNoCSetOops("Stack"));
2743 
2744   // Verify entries on the task queues
2745   for (uint i = 0; i < _max_worker_id; ++i) {
2746     CMTaskQueue* queue = _task_queues->queue(i);
2747     queue->iterate(VerifyNoCSetOops("Queue", i));
2748   }
2749 
2750   // Verify the global finger
2751   HeapWord* global_finger = finger();
2752   if (global_finger != NULL && global_finger < _heap_end) {
2753     // The global finger always points to a heap region boundary. We
2754     // use heap_region_containing_raw() to get the containing region
2755     // given that the global finger could be pointing to a free region
2756     // which subsequently becomes continues humongous. If that
2757     // happens, heap_region_containing() will return the bottom of the
2758     // corresponding starts humongous region and the check below will
2759     // not hold any more.
2760     // Since we always iterate over all regions, we might get a NULL HeapRegion
2761     // here.
2762     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2763     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2764               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2765               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2766   }
2767 
2768   // Verify the task fingers
2769   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2770   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2771     CMTask* task = _tasks[i];
2772     HeapWord* task_finger = task->finger();
2773     if (task_finger != NULL && task_finger < _heap_end) {
2774       // See above note on the global finger verification.
2775       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2776       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2777                 !task_hr->in_collection_set(),
2778                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2779                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2780     }
2781   }
2782 }
2783 #endif // PRODUCT
2784 
2785 // Aggregate the counting data that was constructed concurrently
2786 // with marking.
2787 class AggregateCountDataHRClosure: public HeapRegionClosure {
2788   G1CollectedHeap* _g1h;
2789   ConcurrentMark* _cm;
2790   CardTableModRefBS* _ct_bs;
2791   BitMap* _cm_card_bm;
2792   uint _max_worker_id;
2793 
2794  public:
2795   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2796                               BitMap* cm_card_bm,
2797                               uint max_worker_id) :
2798     _g1h(g1h), _cm(g1h->concurrent_mark()),
2799     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2800     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2801 
2802   bool doHeapRegion(HeapRegion* hr) {
2803     if (hr->is_continues_humongous()) {
2804       // We will ignore these here and process them when their
2805       // associated "starts humongous" region is processed.
2806       // Note that we cannot rely on their associated
2807       // "starts humongous" region to have their bit set to 1
2808       // since, due to the region chunking in the parallel region
2809       // iteration, a "continues humongous" region might be visited
2810       // before its associated "starts humongous".
2811       return false;
2812     }
2813 
2814     HeapWord* start = hr->bottom();
2815     HeapWord* limit = hr->next_top_at_mark_start();
2816     HeapWord* end = hr->end();
2817 
2818     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2819            "Preconditions not met - "
2820            "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2821            "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2822            p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2823 
2824     assert(hr->next_marked_bytes() == 0, "Precondition");
2825 
2826     if (start == limit) {
2827       // NTAMS of this region has not been set so nothing to do.
2828       return false;
2829     }
2830 
2831     // 'start' should be in the heap.
2832     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2833     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2834     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2835 
2836     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2837     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2838     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2839 
2840     // If ntams is not card aligned then we bump card bitmap index
2841     // for limit so that we get the all the cards spanned by
2842     // the object ending at ntams.


< prev index next >