289 // Double capacity if possible
290 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
291 // Do not give up existing stack until we have managed to
292 // get the double capacity that we desired.
293 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
294 sizeof(oop)));
295 if (rs.is_reserved()) {
296 // Release the backing store associated with old stack
297 _virtual_space.release();
298 // Reinitialize virtual space for new stack
299 if (!_virtual_space.initialize(rs, rs.size())) {
300 fatal("Not enough swap for expanded marking stack capacity");
301 }
302 _base = (oop*)(_virtual_space.low());
303 _index = 0;
304 _capacity = new_capacity;
305 } else {
306 if (PrintGCDetails && Verbose) {
307 // Failed to double capacity, continue;
308 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
309 SIZE_FORMAT"K to " SIZE_FORMAT"K",
310 _capacity / K, new_capacity / K);
311 }
312 }
313 }
314
315 void CMMarkStack::set_should_expand() {
316 // If we're resetting the marking state because of an
317 // marking stack overflow, record that we should, if
318 // possible, expand the stack.
319 _should_expand = _cm->has_overflown();
320 }
321
322 CMMarkStack::~CMMarkStack() {
323 if (_base != NULL) {
324 _base = NULL;
325 _virtual_space.release();
326 }
327 }
328
329 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
537 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
538 _cleanup_times(),
539 _total_counting_time(0.0),
540 _total_rs_scrub_time(0.0),
541
542 _parallel_workers(NULL),
543
544 _count_card_bitmaps(NULL),
545 _count_marked_bytes(NULL),
546 _completed_initialization(false) {
547 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
548 if (verbose_level < no_verbose) {
549 verbose_level = no_verbose;
550 }
551 if (verbose_level > high_verbose) {
552 verbose_level = high_verbose;
553 }
554 _verbose_level = verbose_level;
555
556 if (verbose_low()) {
557 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
558 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
559 }
560
561 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
562 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
563
564 // Create & start a ConcurrentMark thread.
565 _cmThread = new ConcurrentMarkThread(this);
566 assert(cmThread() != NULL, "CM Thread should have been created");
567 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
568 if (_cmThread->osthread() == NULL) {
569 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
570 }
571
572 assert(CGC_lock != NULL, "Where's the CGC_lock?");
573 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
574 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
575
576 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
577 satb_qs.set_buffer_size(G1SATBBufferSize);
784 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
785 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
786 }
787
788 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
789 set_concurrency(active_tasks);
790
791 _concurrent = concurrent;
792 // We propagate this to all tasks, not just the active ones.
793 for (uint i = 0; i < _max_worker_id; ++i)
794 _tasks[i]->set_concurrent(concurrent);
795
796 if (concurrent) {
797 set_concurrent_marking_in_progress();
798 } else {
799 // We currently assume that the concurrent flag has been set to
800 // false before we start remark. At this point we should also be
801 // in a STW phase.
802 assert(!concurrent_marking_in_progress(), "invariant");
803 assert(out_of_regions(),
804 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
805 p2i(_finger), p2i(_heap_end)));
806 }
807 }
808
809 void ConcurrentMark::set_non_marking_state() {
810 // We set the global marking state to some default values when we're
811 // not doing marking.
812 reset_marking_state();
813 _active_tasks = 0;
814 clear_concurrent_marking_in_progress();
815 }
816
817 ConcurrentMark::~ConcurrentMark() {
818 // The ConcurrentMark instance is never freed.
819 ShouldNotReachHere();
820 }
821
822 void ConcurrentMark::clearNextBitmap() {
823 G1CollectedHeap* g1h = G1CollectedHeap::heap();
824
1396 _bm(bm), _region_marked_bytes(0) { }
1397
1398 bool doHeapRegion(HeapRegion* hr) {
1399
1400 if (hr->is_continues_humongous()) {
1401 // We will ignore these here and process them when their
1402 // associated "starts humongous" region is processed (see
1403 // set_bit_for_heap_region()). Note that we cannot rely on their
1404 // associated "starts humongous" region to have their bit set to
1405 // 1 since, due to the region chunking in the parallel region
1406 // iteration, a "continues humongous" region might be visited
1407 // before its associated "starts humongous".
1408 return false;
1409 }
1410
1411 HeapWord* ntams = hr->next_top_at_mark_start();
1412 HeapWord* start = hr->bottom();
1413
1414 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1415 err_msg("Preconditions not met - "
1416 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1417 p2i(start), p2i(ntams), p2i(hr->end())));
1418
1419 // Find the first marked object at or after "start".
1420 start = _bm->getNextMarkedWordAddress(start, ntams);
1421
1422 size_t marked_bytes = 0;
1423
1424 while (start < ntams) {
1425 oop obj = oop(start);
1426 int obj_sz = obj->size();
1427 HeapWord* obj_end = start + obj_sz;
1428
1429 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1430 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1431
1432 // Note: if we're looking at the last region in heap - obj_end
1433 // could be actually just beyond the end of the heap; end_idx
1434 // will then correspond to a (non-existent) card that is also
1435 // just beyond the heap.
1436 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1697 // Mark the allocated-since-marking portion...
1698 if (ntams < top) {
1699 // This definitely means the region has live objects.
1700 set_bit_for_region(hr);
1701
1702 // Now set the bits in the card bitmap for [ntams, top)
1703 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1704 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1705
1706 // Note: if we're looking at the last region in heap - top
1707 // could be actually just beyond the end of the heap; end_idx
1708 // will then correspond to a (non-existent) card that is also
1709 // just beyond the heap.
1710 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1711 // end of object is not card aligned - increment to cover
1712 // all the cards spanned by the object
1713 end_idx += 1;
1714 }
1715
1716 assert(end_idx <= _card_bm->size(),
1717 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1718 end_idx, _card_bm->size()));
1719 assert(start_idx < _card_bm->size(),
1720 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1721 start_idx, _card_bm->size()));
1722
1723 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1724 }
1725
1726 // Set the bit for the region if it contains live data
1727 if (hr->next_marked_bytes() > 0) {
1728 set_bit_for_region(hr);
1729 }
1730
1731 return false;
1732 }
1733 };
1734
1735 class G1ParFinalCountTask: public AbstractGangTask {
1736 protected:
1737 G1CollectedHeap* _g1h;
1738 ConcurrentMark* _cm;
1739 BitMap* _actual_region_bm;
1740 BitMap* _actual_card_bm;
2145 int _ref_counter_limit;
2146 int _ref_counter;
2147 bool _is_serial;
2148 public:
2149 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2150 _cm(cm), _task(task), _is_serial(is_serial),
2151 _ref_counter_limit(G1RefProcDrainInterval) {
2152 assert(_ref_counter_limit > 0, "sanity");
2153 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2154 _ref_counter = _ref_counter_limit;
2155 }
2156
2157 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2158 virtual void do_oop( oop* p) { do_oop_work(p); }
2159
2160 template <class T> void do_oop_work(T* p) {
2161 if (!_cm->has_overflown()) {
2162 oop obj = oopDesc::load_decode_heap_oop(p);
2163 if (_cm->verbose_high()) {
2164 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2165 "*"PTR_FORMAT" = "PTR_FORMAT,
2166 _task->worker_id(), p2i(p), p2i((void*) obj));
2167 }
2168
2169 _task->deal_with_reference(obj);
2170 _ref_counter--;
2171
2172 if (_ref_counter == 0) {
2173 // We have dealt with _ref_counter_limit references, pushing them
2174 // and objects reachable from them on to the local stack (and
2175 // possibly the global stack). Call CMTask::do_marking_step() to
2176 // process these entries.
2177 //
2178 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2179 // there's nothing more to do (i.e. we're done with the entries that
2180 // were pushed as a result of the CMTask::deal_with_reference() calls
2181 // above) or we overflow.
2182 //
2183 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2184 // flag while there may still be some work to do. (See the comment at
2185 // the beginning of CMTask::do_marking_step() for those conditions -
2680 // it will skip the subsequent CH regions).
2681 // If it comes across a region that suddenly becomes CH, the
2682 // scenario will be similar to b). So, the race between
2683 // claim_region() and a humongous object allocation might force us
2684 // to do a bit of unnecessary work (due to some unnecessary bitmap
2685 // iterations) but it should not introduce and correctness issues.
2686 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2687
2688 // Above heap_region_containing_raw may return NULL as we always scan claim
2689 // until the end of the heap. In this case, just jump to the next region.
2690 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2691
2692 // Is the gap between reading the finger and doing the CAS too long?
2693 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2694 if (res == finger && curr_region != NULL) {
2695 // we succeeded
2696 HeapWord* bottom = curr_region->bottom();
2697 HeapWord* limit = curr_region->next_top_at_mark_start();
2698
2699 if (verbose_low()) {
2700 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2701 "["PTR_FORMAT", "PTR_FORMAT"), "
2702 "limit = "PTR_FORMAT,
2703 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2704 }
2705
2706 // notice that _finger == end cannot be guaranteed here since,
2707 // someone else might have moved the finger even further
2708 assert(_finger >= end, "the finger should have moved forward");
2709
2710 if (verbose_low()) {
2711 gclog_or_tty->print_cr("[%u] we were successful with region = "
2712 PTR_FORMAT, worker_id, p2i(curr_region));
2713 }
2714
2715 if (limit > bottom) {
2716 if (verbose_low()) {
2717 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2718 "returning it ", worker_id, p2i(curr_region));
2719 }
2720 return curr_region;
2721 } else {
2722 assert(limit == bottom,
2723 "the region limit should be at bottom");
2724 if (verbose_low()) {
2725 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2726 "returning NULL", worker_id, p2i(curr_region));
2727 }
2728 // we return NULL and the caller should try calling
2729 // claim_region() again.
2730 return NULL;
2731 }
2732 } else {
2733 assert(_finger > finger, "the finger should have moved forward");
2734 if (verbose_low()) {
2735 if (curr_region == NULL) {
2736 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2737 "global finger = "PTR_FORMAT", "
2738 "our finger = "PTR_FORMAT,
2739 worker_id, p2i(_finger), p2i(finger));
2740 } else {
2741 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2742 "global finger = "PTR_FORMAT", "
2743 "our finger = "PTR_FORMAT,
2744 worker_id, p2i(_finger), p2i(finger));
2745 }
2746 }
2747
2748 // read it again
2749 finger = _finger;
2750 }
2751 }
2752
2753 return NULL;
2754 }
2755
2756 #ifndef PRODUCT
2757 enum VerifyNoCSetOopsPhase {
2758 VerifyNoCSetOopsStack,
2759 VerifyNoCSetOopsQueues
2760 };
2761
2762 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
2763 private:
2764 G1CollectedHeap* _g1h;
2765 VerifyNoCSetOopsPhase _phase;
2766 int _info;
2767
2768 const char* phase_str() {
2769 switch (_phase) {
2770 case VerifyNoCSetOopsStack: return "Stack";
2771 case VerifyNoCSetOopsQueues: return "Queue";
2772 default: ShouldNotReachHere();
2773 }
2774 return NULL;
2775 }
2776
2777 void do_object_work(oop obj) {
2778 guarantee(!_g1h->obj_in_cs(obj),
2779 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2780 p2i((void*) obj), phase_str(), _info));
2781 }
2782
2783 public:
2784 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2785
2786 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2787 _phase = phase;
2788 _info = info;
2789 }
2790
2791 virtual void do_oop(oop* p) {
2792 oop obj = oopDesc::load_decode_heap_oop(p);
2793 do_object_work(obj);
2794 }
2795
2796 virtual void do_oop(narrowOop* p) {
2797 // We should not come across narrow oops while scanning marking
2798 // stacks
2799 ShouldNotReachHere();
2820 for (uint i = 0; i < _max_worker_id; i += 1) {
2821 cl.set_phase(VerifyNoCSetOopsQueues, i);
2822 CMTaskQueue* queue = _task_queues->queue(i);
2823 queue->oops_do(&cl);
2824 }
2825
2826 // Verify the global finger
2827 HeapWord* global_finger = finger();
2828 if (global_finger != NULL && global_finger < _heap_end) {
2829 // The global finger always points to a heap region boundary. We
2830 // use heap_region_containing_raw() to get the containing region
2831 // given that the global finger could be pointing to a free region
2832 // which subsequently becomes continues humongous. If that
2833 // happens, heap_region_containing() will return the bottom of the
2834 // corresponding starts humongous region and the check below will
2835 // not hold any more.
2836 // Since we always iterate over all regions, we might get a NULL HeapRegion
2837 // here.
2838 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2839 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2840 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2841 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2842 }
2843
2844 // Verify the task fingers
2845 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2846 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2847 CMTask* task = _tasks[i];
2848 HeapWord* task_finger = task->finger();
2849 if (task_finger != NULL && task_finger < _heap_end) {
2850 // See above note on the global finger verification.
2851 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2852 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2853 !task_hr->in_collection_set(),
2854 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2855 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2856 }
2857 }
2858 }
2859 #endif // PRODUCT
2860
2861 // Aggregate the counting data that was constructed concurrently
2862 // with marking.
2863 class AggregateCountDataHRClosure: public HeapRegionClosure {
2864 G1CollectedHeap* _g1h;
2865 ConcurrentMark* _cm;
2866 CardTableModRefBS* _ct_bs;
2867 BitMap* _cm_card_bm;
2868 uint _max_worker_id;
2869
2870 public:
2871 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2872 BitMap* cm_card_bm,
2873 uint max_worker_id) :
2874 _g1h(g1h), _cm(g1h->concurrent_mark()),
2876 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2877
2878 bool doHeapRegion(HeapRegion* hr) {
2879 if (hr->is_continues_humongous()) {
2880 // We will ignore these here and process them when their
2881 // associated "starts humongous" region is processed.
2882 // Note that we cannot rely on their associated
2883 // "starts humongous" region to have their bit set to 1
2884 // since, due to the region chunking in the parallel region
2885 // iteration, a "continues humongous" region might be visited
2886 // before its associated "starts humongous".
2887 return false;
2888 }
2889
2890 HeapWord* start = hr->bottom();
2891 HeapWord* limit = hr->next_top_at_mark_start();
2892 HeapWord* end = hr->end();
2893
2894 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2895 err_msg("Preconditions not met - "
2896 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
2897 "top: "PTR_FORMAT", end: "PTR_FORMAT,
2898 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2899
2900 assert(hr->next_marked_bytes() == 0, "Precondition");
2901
2902 if (start == limit) {
2903 // NTAMS of this region has not been set so nothing to do.
2904 return false;
2905 }
2906
2907 // 'start' should be in the heap.
2908 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2909 // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2910 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2911
2912 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2913 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2914 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2915
2916 // If ntams is not card aligned then we bump card bitmap index
2917 // for limit so that we get the all the cards spanned by
3139 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3140 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3141 }
3142
3143 // We take a break if someone is trying to stop the world.
3144 bool ConcurrentMark::do_yield_check(uint worker_id) {
3145 if (SuspendibleThreadSet::should_yield()) {
3146 if (worker_id == 0) {
3147 _g1h->g1_policy()->record_concurrent_pause();
3148 }
3149 SuspendibleThreadSet::yield();
3150 return true;
3151 } else {
3152 return false;
3153 }
3154 }
3155
3156 #ifndef PRODUCT
3157 // for debugging purposes
3158 void ConcurrentMark::print_finger() {
3159 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3160 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3161 for (uint i = 0; i < _max_worker_id; ++i) {
3162 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3163 }
3164 gclog_or_tty->cr();
3165 }
3166 #endif
3167
3168 template<bool scan>
3169 inline void CMTask::process_grey_object(oop obj) {
3170 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3171 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3172
3173 if (_cm->verbose_high()) {
3174 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3175 _worker_id, p2i((void*) obj));
3176 }
3177
3178 size_t obj_size = obj->size();
3179 _words_scanned += obj_size;
3224
3225 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3226 ConcurrentMark* cm,
3227 CMTask* task)
3228 : _g1h(g1h), _cm(cm), _task(task) {
3229 assert(_ref_processor == NULL, "should be initialized to NULL");
3230
3231 if (G1UseConcMarkReferenceProcessing) {
3232 _ref_processor = g1h->ref_processor_cm();
3233 assert(_ref_processor != NULL, "should not be NULL");
3234 }
3235 }
3236
3237 void CMTask::setup_for_region(HeapRegion* hr) {
3238 assert(hr != NULL,
3239 "claim_region() should have filtered out NULL regions");
3240 assert(!hr->is_continues_humongous(),
3241 "claim_region() should have filtered out continues humongous regions");
3242
3243 if (_cm->verbose_low()) {
3244 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3245 _worker_id, p2i(hr));
3246 }
3247
3248 _curr_region = hr;
3249 _finger = hr->bottom();
3250 update_region_limit();
3251 }
3252
3253 void CMTask::update_region_limit() {
3254 HeapRegion* hr = _curr_region;
3255 HeapWord* bottom = hr->bottom();
3256 HeapWord* limit = hr->next_top_at_mark_start();
3257
3258 if (limit == bottom) {
3259 if (_cm->verbose_low()) {
3260 gclog_or_tty->print_cr("[%u] found an empty region "
3261 "["PTR_FORMAT", "PTR_FORMAT")",
3262 _worker_id, p2i(bottom), p2i(limit));
3263 }
3264 // The region was collected underneath our feet.
3265 // We set the finger to bottom to ensure that the bitmap
3266 // iteration that will follow this will not do anything.
3267 // (this is not a condition that holds when we set the region up,
3268 // as the region is not supposed to be empty in the first place)
3269 _finger = bottom;
3270 } else if (limit >= _region_limit) {
3271 assert(limit >= _finger, "peace of mind");
3272 } else {
3273 assert(limit < _region_limit, "only way to get here");
3274 // This can happen under some pretty unusual circumstances. An
3275 // evacuation pause empties the region underneath our feet (NTAMS
3276 // at bottom). We then do some allocation in the region (NTAMS
3277 // stays at bottom), followed by the region being used as a GC
3278 // alloc region (NTAMS will move to top() and the objects
3279 // originally below it will be grayed). All objects now marked in
3280 // the region are explicitly grayed, if below the global finger,
3281 // and we do not need in fact to scan anything else. So, we simply
3282 // set _finger to be limit to ensure that the bitmap iteration
3283 // doesn't do anything.
3284 _finger = limit;
3285 }
3286
3287 _region_limit = limit;
3288 }
3289
3290 void CMTask::giveup_current_region() {
3291 assert(_curr_region != NULL, "invariant");
3292 if (_cm->verbose_low()) {
3293 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3294 _worker_id, p2i(_curr_region));
3295 }
3296 clear_region_fields();
3297 }
3298
3299 void CMTask::clear_region_fields() {
3300 // Values for these three fields that indicate that we're not
3301 // holding on to a region.
3302 _curr_region = NULL;
3303 _finger = NULL;
3304 _region_limit = NULL;
3305 }
3306
3307 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3308 if (cm_oop_closure == NULL) {
3309 assert(_cm_oop_closure != NULL, "invariant");
3310 } else {
3311 assert(_cm_oop_closure == NULL, "invariant");
3312 }
3313 _cm_oop_closure = cm_oop_closure;
3395 return;
3396 }
3397
3398 double curr_time_ms = os::elapsedVTime() * 1000.0;
3399
3400 // (3) If marking stats are enabled, then we update the step history.
3401 #if _MARKING_STATS_
3402 if (_words_scanned >= _words_scanned_limit) {
3403 ++_clock_due_to_scanning;
3404 }
3405 if (_refs_reached >= _refs_reached_limit) {
3406 ++_clock_due_to_marking;
3407 }
3408
3409 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3410 _interval_start_time_ms = curr_time_ms;
3411 _all_clock_intervals_ms.add(last_interval_ms);
3412
3413 if (_cm->verbose_medium()) {
3414 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3415 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3416 _worker_id, last_interval_ms,
3417 _words_scanned,
3418 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3419 _refs_reached,
3420 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3421 }
3422 #endif // _MARKING_STATS_
3423
3424 // (4) We check whether we should yield. If we have to, then we abort.
3425 if (SuspendibleThreadSet::should_yield()) {
3426 // We should yield. To do this we abort the task. The caller is
3427 // responsible for yielding.
3428 set_has_aborted();
3429 statsOnly( ++_aborted_yield );
3430 return;
3431 }
3432
3433 // (5) We check whether we've reached our time quota. If we have,
3434 // then we abort.
3435 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3564 // of things to do) or totally (at the very end).
3565 size_t target_size;
3566 if (partially) {
3567 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3568 } else {
3569 target_size = 0;
3570 }
3571
3572 if (_task_queue->size() > target_size) {
3573 if (_cm->verbose_high()) {
3574 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3575 _worker_id, target_size);
3576 }
3577
3578 oop obj;
3579 bool ret = _task_queue->pop_local(obj);
3580 while (ret) {
3581 statsOnly( ++_local_pops );
3582
3583 if (_cm->verbose_high()) {
3584 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3585 p2i((void*) obj));
3586 }
3587
3588 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3589 assert(!_g1h->is_on_master_free_list(
3590 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3591
3592 scan_object(obj);
3593
3594 if (_task_queue->size() <= target_size || has_aborted()) {
3595 ret = false;
3596 } else {
3597 ret = _task_queue->pop_local(obj);
3598 }
3599 }
3600
3601 if (_cm->verbose_high()) {
3602 gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3603 _worker_id, _task_queue->size());
3604 }
3921 // This means that we're already holding on to a region.
3922 assert(_finger != NULL, "if region is not NULL, then the finger "
3923 "should not be NULL either");
3924
3925 // We might have restarted this task after an evacuation pause
3926 // which might have evacuated the region we're holding on to
3927 // underneath our feet. Let's read its limit again to make sure
3928 // that we do not iterate over a region of the heap that
3929 // contains garbage (update_region_limit() will also move
3930 // _finger to the start of the region if it is found empty).
3931 update_region_limit();
3932 // We will start from _finger not from the start of the region,
3933 // as we might be restarting this task after aborting half-way
3934 // through scanning this region. In this case, _finger points to
3935 // the address where we last found a marked object. If this is a
3936 // fresh region, _finger points to start().
3937 MemRegion mr = MemRegion(_finger, _region_limit);
3938
3939 if (_cm->verbose_low()) {
3940 gclog_or_tty->print_cr("[%u] we're scanning part "
3941 "["PTR_FORMAT", "PTR_FORMAT") "
3942 "of region "HR_FORMAT,
3943 _worker_id, p2i(_finger), p2i(_region_limit),
3944 HR_FORMAT_PARAMS(_curr_region));
3945 }
3946
3947 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
3948 "humongous regions should go around loop once only");
3949
3950 // Some special cases:
3951 // If the memory region is empty, we can just give up the region.
3952 // If the current region is humongous then we only need to check
3953 // the bitmap for the bit associated with the start of the object,
3954 // scan the object if it's live, and give up the region.
3955 // Otherwise, let's iterate over the bitmap of the part of the region
3956 // that is left.
3957 // If the iteration is successful, give up the region.
3958 if (mr.is_empty()) {
3959 giveup_current_region();
3960 regular_clock_call();
3961 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
3962 if (_nextMarkBitMap->isMarked(mr.start())) {
4009 // return NULL with potentially more regions available for
4010 // claiming and why we have to check out_of_regions() to determine
4011 // whether we're done or not.
4012 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4013 // We are going to try to claim a new region. We should have
4014 // given up on the previous one.
4015 // Separated the asserts so that we know which one fires.
4016 assert(_curr_region == NULL, "invariant");
4017 assert(_finger == NULL, "invariant");
4018 assert(_region_limit == NULL, "invariant");
4019 if (_cm->verbose_low()) {
4020 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4021 }
4022 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4023 if (claimed_region != NULL) {
4024 // Yes, we managed to claim one
4025 statsOnly( ++_regions_claimed );
4026
4027 if (_cm->verbose_low()) {
4028 gclog_or_tty->print_cr("[%u] we successfully claimed "
4029 "region "PTR_FORMAT,
4030 _worker_id, p2i(claimed_region));
4031 }
4032
4033 setup_for_region(claimed_region);
4034 assert(_curr_region == claimed_region, "invariant");
4035 }
4036 // It is important to call the regular clock here. It might take
4037 // a while to claim a region if, for example, we hit a large
4038 // block of empty regions. So we need to call the regular clock
4039 // method once round the loop to make sure it's called
4040 // frequently enough.
4041 regular_clock_call();
4042 }
4043
4044 if (!has_aborted() && _curr_region == NULL) {
4045 assert(_cm->out_of_regions(),
4046 "at this point we should be out of regions");
4047 }
4048 } while ( _curr_region != NULL && !has_aborted());
4049
4070 // Attempt at work stealing from other task's queues.
4071 if (do_stealing && !has_aborted()) {
4072 // We have not aborted. This means that we have finished all that
4073 // we could. Let's try to do some stealing...
4074
4075 // We cannot check whether the global stack is empty, since other
4076 // tasks might be pushing objects to it concurrently.
4077 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4078 "only way to reach here");
4079
4080 if (_cm->verbose_low()) {
4081 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4082 }
4083
4084 while (!has_aborted()) {
4085 oop obj;
4086 statsOnly( ++_steal_attempts );
4087
4088 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4089 if (_cm->verbose_medium()) {
4090 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4091 _worker_id, p2i((void*) obj));
4092 }
4093
4094 statsOnly( ++_steals );
4095
4096 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4097 "any stolen object should be marked");
4098 scan_object(obj);
4099
4100 // And since we're towards the end, let's totally drain the
4101 // local queue and global stack.
4102 drain_local_queue(false);
4103 drain_global_stack(false);
4104 } else {
4105 break;
4106 }
4107 }
4108 }
4109
4110 // If we are about to wrap up and go into termination, check if we
4278 guarantee(task_queue != NULL, "invariant");
4279 guarantee(task_queues != NULL, "invariant");
4280
4281 statsOnly( _clock_due_to_scanning = 0;
4282 _clock_due_to_marking = 0 );
4283
4284 _marking_step_diffs_ms.add(0.5);
4285 }
4286
4287 // These are formatting macros that are used below to ensure
4288 // consistent formatting. The *_H_* versions are used to format the
4289 // header for a particular value and they should be kept consistent
4290 // with the corresponding macro. Also note that most of the macros add
4291 // the necessary white space (as a prefix) which makes them a bit
4292 // easier to compose.
4293
4294 // All the output lines are prefixed with this string to be able to
4295 // identify them easily in a large log file.
4296 #define G1PPRL_LINE_PREFIX "###"
4297
4298 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4299 #ifdef _LP64
4300 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4301 #else // _LP64
4302 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4303 #endif // _LP64
4304
4305 // For per-region info
4306 #define G1PPRL_TYPE_FORMAT " %-4s"
4307 #define G1PPRL_TYPE_H_FORMAT " %4s"
4308 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4309 #define G1PPRL_BYTE_H_FORMAT " %9s"
4310 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4311 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4312
4313 // For summary info
4314 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4315 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4316 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4317 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4318
4319 G1PrintRegionLivenessInfoClosure::
4320 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4321 : _out(out),
4322 _total_used_bytes(0), _total_capacity_bytes(0),
4323 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4324 _hum_used_bytes(0), _hum_capacity_bytes(0),
4325 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4326 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4327 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4328 MemRegion g1_reserved = g1h->g1_reserved();
4329 double now = os::elapsedTime();
4330
4331 // Print the header of the output.
4332 _out->cr();
4333 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4334 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4335 G1PPRL_SUM_ADDR_FORMAT("reserved")
4336 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4337 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
|
289 // Double capacity if possible
290 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
291 // Do not give up existing stack until we have managed to
292 // get the double capacity that we desired.
293 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
294 sizeof(oop)));
295 if (rs.is_reserved()) {
296 // Release the backing store associated with old stack
297 _virtual_space.release();
298 // Reinitialize virtual space for new stack
299 if (!_virtual_space.initialize(rs, rs.size())) {
300 fatal("Not enough swap for expanded marking stack capacity");
301 }
302 _base = (oop*)(_virtual_space.low());
303 _index = 0;
304 _capacity = new_capacity;
305 } else {
306 if (PrintGCDetails && Verbose) {
307 // Failed to double capacity, continue;
308 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
309 SIZE_FORMAT "K to " SIZE_FORMAT "K",
310 _capacity / K, new_capacity / K);
311 }
312 }
313 }
314
315 void CMMarkStack::set_should_expand() {
316 // If we're resetting the marking state because of an
317 // marking stack overflow, record that we should, if
318 // possible, expand the stack.
319 _should_expand = _cm->has_overflown();
320 }
321
322 CMMarkStack::~CMMarkStack() {
323 if (_base != NULL) {
324 _base = NULL;
325 _virtual_space.release();
326 }
327 }
328
329 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
537 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
538 _cleanup_times(),
539 _total_counting_time(0.0),
540 _total_rs_scrub_time(0.0),
541
542 _parallel_workers(NULL),
543
544 _count_card_bitmaps(NULL),
545 _count_marked_bytes(NULL),
546 _completed_initialization(false) {
547 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
548 if (verbose_level < no_verbose) {
549 verbose_level = no_verbose;
550 }
551 if (verbose_level > high_verbose) {
552 verbose_level = high_verbose;
553 }
554 _verbose_level = verbose_level;
555
556 if (verbose_low()) {
557 gclog_or_tty->print_cr("[global] init, heap start = " PTR_FORMAT ", "
558 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
559 }
560
561 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
562 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
563
564 // Create & start a ConcurrentMark thread.
565 _cmThread = new ConcurrentMarkThread(this);
566 assert(cmThread() != NULL, "CM Thread should have been created");
567 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
568 if (_cmThread->osthread() == NULL) {
569 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
570 }
571
572 assert(CGC_lock != NULL, "Where's the CGC_lock?");
573 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
574 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
575
576 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
577 satb_qs.set_buffer_size(G1SATBBufferSize);
784 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
785 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
786 }
787
788 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
789 set_concurrency(active_tasks);
790
791 _concurrent = concurrent;
792 // We propagate this to all tasks, not just the active ones.
793 for (uint i = 0; i < _max_worker_id; ++i)
794 _tasks[i]->set_concurrent(concurrent);
795
796 if (concurrent) {
797 set_concurrent_marking_in_progress();
798 } else {
799 // We currently assume that the concurrent flag has been set to
800 // false before we start remark. At this point we should also be
801 // in a STW phase.
802 assert(!concurrent_marking_in_progress(), "invariant");
803 assert(out_of_regions(),
804 err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
805 p2i(_finger), p2i(_heap_end)));
806 }
807 }
808
809 void ConcurrentMark::set_non_marking_state() {
810 // We set the global marking state to some default values when we're
811 // not doing marking.
812 reset_marking_state();
813 _active_tasks = 0;
814 clear_concurrent_marking_in_progress();
815 }
816
817 ConcurrentMark::~ConcurrentMark() {
818 // The ConcurrentMark instance is never freed.
819 ShouldNotReachHere();
820 }
821
822 void ConcurrentMark::clearNextBitmap() {
823 G1CollectedHeap* g1h = G1CollectedHeap::heap();
824
1396 _bm(bm), _region_marked_bytes(0) { }
1397
1398 bool doHeapRegion(HeapRegion* hr) {
1399
1400 if (hr->is_continues_humongous()) {
1401 // We will ignore these here and process them when their
1402 // associated "starts humongous" region is processed (see
1403 // set_bit_for_heap_region()). Note that we cannot rely on their
1404 // associated "starts humongous" region to have their bit set to
1405 // 1 since, due to the region chunking in the parallel region
1406 // iteration, a "continues humongous" region might be visited
1407 // before its associated "starts humongous".
1408 return false;
1409 }
1410
1411 HeapWord* ntams = hr->next_top_at_mark_start();
1412 HeapWord* start = hr->bottom();
1413
1414 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1415 err_msg("Preconditions not met - "
1416 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1417 p2i(start), p2i(ntams), p2i(hr->end())));
1418
1419 // Find the first marked object at or after "start".
1420 start = _bm->getNextMarkedWordAddress(start, ntams);
1421
1422 size_t marked_bytes = 0;
1423
1424 while (start < ntams) {
1425 oop obj = oop(start);
1426 int obj_sz = obj->size();
1427 HeapWord* obj_end = start + obj_sz;
1428
1429 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1430 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1431
1432 // Note: if we're looking at the last region in heap - obj_end
1433 // could be actually just beyond the end of the heap; end_idx
1434 // will then correspond to a (non-existent) card that is also
1435 // just beyond the heap.
1436 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1697 // Mark the allocated-since-marking portion...
1698 if (ntams < top) {
1699 // This definitely means the region has live objects.
1700 set_bit_for_region(hr);
1701
1702 // Now set the bits in the card bitmap for [ntams, top)
1703 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1704 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1705
1706 // Note: if we're looking at the last region in heap - top
1707 // could be actually just beyond the end of the heap; end_idx
1708 // will then correspond to a (non-existent) card that is also
1709 // just beyond the heap.
1710 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1711 // end of object is not card aligned - increment to cover
1712 // all the cards spanned by the object
1713 end_idx += 1;
1714 }
1715
1716 assert(end_idx <= _card_bm->size(),
1717 err_msg("oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1718 end_idx, _card_bm->size()));
1719 assert(start_idx < _card_bm->size(),
1720 err_msg("oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1721 start_idx, _card_bm->size()));
1722
1723 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1724 }
1725
1726 // Set the bit for the region if it contains live data
1727 if (hr->next_marked_bytes() > 0) {
1728 set_bit_for_region(hr);
1729 }
1730
1731 return false;
1732 }
1733 };
1734
1735 class G1ParFinalCountTask: public AbstractGangTask {
1736 protected:
1737 G1CollectedHeap* _g1h;
1738 ConcurrentMark* _cm;
1739 BitMap* _actual_region_bm;
1740 BitMap* _actual_card_bm;
2145 int _ref_counter_limit;
2146 int _ref_counter;
2147 bool _is_serial;
2148 public:
2149 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2150 _cm(cm), _task(task), _is_serial(is_serial),
2151 _ref_counter_limit(G1RefProcDrainInterval) {
2152 assert(_ref_counter_limit > 0, "sanity");
2153 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2154 _ref_counter = _ref_counter_limit;
2155 }
2156
2157 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2158 virtual void do_oop( oop* p) { do_oop_work(p); }
2159
2160 template <class T> void do_oop_work(T* p) {
2161 if (!_cm->has_overflown()) {
2162 oop obj = oopDesc::load_decode_heap_oop(p);
2163 if (_cm->verbose_high()) {
2164 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2165 "*" PTR_FORMAT " = " PTR_FORMAT,
2166 _task->worker_id(), p2i(p), p2i((void*) obj));
2167 }
2168
2169 _task->deal_with_reference(obj);
2170 _ref_counter--;
2171
2172 if (_ref_counter == 0) {
2173 // We have dealt with _ref_counter_limit references, pushing them
2174 // and objects reachable from them on to the local stack (and
2175 // possibly the global stack). Call CMTask::do_marking_step() to
2176 // process these entries.
2177 //
2178 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2179 // there's nothing more to do (i.e. we're done with the entries that
2180 // were pushed as a result of the CMTask::deal_with_reference() calls
2181 // above) or we overflow.
2182 //
2183 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2184 // flag while there may still be some work to do. (See the comment at
2185 // the beginning of CMTask::do_marking_step() for those conditions -
2680 // it will skip the subsequent CH regions).
2681 // If it comes across a region that suddenly becomes CH, the
2682 // scenario will be similar to b). So, the race between
2683 // claim_region() and a humongous object allocation might force us
2684 // to do a bit of unnecessary work (due to some unnecessary bitmap
2685 // iterations) but it should not introduce and correctness issues.
2686 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2687
2688 // Above heap_region_containing_raw may return NULL as we always scan claim
2689 // until the end of the heap. In this case, just jump to the next region.
2690 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2691
2692 // Is the gap between reading the finger and doing the CAS too long?
2693 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2694 if (res == finger && curr_region != NULL) {
2695 // we succeeded
2696 HeapWord* bottom = curr_region->bottom();
2697 HeapWord* limit = curr_region->next_top_at_mark_start();
2698
2699 if (verbose_low()) {
2700 gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " "
2701 "[" PTR_FORMAT ", " PTR_FORMAT "), "
2702 "limit = " PTR_FORMAT,
2703 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2704 }
2705
2706 // notice that _finger == end cannot be guaranteed here since,
2707 // someone else might have moved the finger even further
2708 assert(_finger >= end, "the finger should have moved forward");
2709
2710 if (verbose_low()) {
2711 gclog_or_tty->print_cr("[%u] we were successful with region = "
2712 PTR_FORMAT, worker_id, p2i(curr_region));
2713 }
2714
2715 if (limit > bottom) {
2716 if (verbose_low()) {
2717 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is not empty, "
2718 "returning it ", worker_id, p2i(curr_region));
2719 }
2720 return curr_region;
2721 } else {
2722 assert(limit == bottom,
2723 "the region limit should be at bottom");
2724 if (verbose_low()) {
2725 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is empty, "
2726 "returning NULL", worker_id, p2i(curr_region));
2727 }
2728 // we return NULL and the caller should try calling
2729 // claim_region() again.
2730 return NULL;
2731 }
2732 } else {
2733 assert(_finger > finger, "the finger should have moved forward");
2734 if (verbose_low()) {
2735 if (curr_region == NULL) {
2736 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2737 "global finger = " PTR_FORMAT ", "
2738 "our finger = " PTR_FORMAT,
2739 worker_id, p2i(_finger), p2i(finger));
2740 } else {
2741 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2742 "global finger = " PTR_FORMAT ", "
2743 "our finger = " PTR_FORMAT,
2744 worker_id, p2i(_finger), p2i(finger));
2745 }
2746 }
2747
2748 // read it again
2749 finger = _finger;
2750 }
2751 }
2752
2753 return NULL;
2754 }
2755
2756 #ifndef PRODUCT
2757 enum VerifyNoCSetOopsPhase {
2758 VerifyNoCSetOopsStack,
2759 VerifyNoCSetOopsQueues
2760 };
2761
2762 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
2763 private:
2764 G1CollectedHeap* _g1h;
2765 VerifyNoCSetOopsPhase _phase;
2766 int _info;
2767
2768 const char* phase_str() {
2769 switch (_phase) {
2770 case VerifyNoCSetOopsStack: return "Stack";
2771 case VerifyNoCSetOopsQueues: return "Queue";
2772 default: ShouldNotReachHere();
2773 }
2774 return NULL;
2775 }
2776
2777 void do_object_work(oop obj) {
2778 guarantee(!_g1h->obj_in_cs(obj),
2779 err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2780 p2i((void*) obj), phase_str(), _info));
2781 }
2782
2783 public:
2784 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2785
2786 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2787 _phase = phase;
2788 _info = info;
2789 }
2790
2791 virtual void do_oop(oop* p) {
2792 oop obj = oopDesc::load_decode_heap_oop(p);
2793 do_object_work(obj);
2794 }
2795
2796 virtual void do_oop(narrowOop* p) {
2797 // We should not come across narrow oops while scanning marking
2798 // stacks
2799 ShouldNotReachHere();
2820 for (uint i = 0; i < _max_worker_id; i += 1) {
2821 cl.set_phase(VerifyNoCSetOopsQueues, i);
2822 CMTaskQueue* queue = _task_queues->queue(i);
2823 queue->oops_do(&cl);
2824 }
2825
2826 // Verify the global finger
2827 HeapWord* global_finger = finger();
2828 if (global_finger != NULL && global_finger < _heap_end) {
2829 // The global finger always points to a heap region boundary. We
2830 // use heap_region_containing_raw() to get the containing region
2831 // given that the global finger could be pointing to a free region
2832 // which subsequently becomes continues humongous. If that
2833 // happens, heap_region_containing() will return the bottom of the
2834 // corresponding starts humongous region and the check below will
2835 // not hold any more.
2836 // Since we always iterate over all regions, we might get a NULL HeapRegion
2837 // here.
2838 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2839 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2840 err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT,
2841 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2842 }
2843
2844 // Verify the task fingers
2845 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2846 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2847 CMTask* task = _tasks[i];
2848 HeapWord* task_finger = task->finger();
2849 if (task_finger != NULL && task_finger < _heap_end) {
2850 // See above note on the global finger verification.
2851 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2852 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2853 !task_hr->in_collection_set(),
2854 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT,
2855 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2856 }
2857 }
2858 }
2859 #endif // PRODUCT
2860
2861 // Aggregate the counting data that was constructed concurrently
2862 // with marking.
2863 class AggregateCountDataHRClosure: public HeapRegionClosure {
2864 G1CollectedHeap* _g1h;
2865 ConcurrentMark* _cm;
2866 CardTableModRefBS* _ct_bs;
2867 BitMap* _cm_card_bm;
2868 uint _max_worker_id;
2869
2870 public:
2871 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2872 BitMap* cm_card_bm,
2873 uint max_worker_id) :
2874 _g1h(g1h), _cm(g1h->concurrent_mark()),
2876 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2877
2878 bool doHeapRegion(HeapRegion* hr) {
2879 if (hr->is_continues_humongous()) {
2880 // We will ignore these here and process them when their
2881 // associated "starts humongous" region is processed.
2882 // Note that we cannot rely on their associated
2883 // "starts humongous" region to have their bit set to 1
2884 // since, due to the region chunking in the parallel region
2885 // iteration, a "continues humongous" region might be visited
2886 // before its associated "starts humongous".
2887 return false;
2888 }
2889
2890 HeapWord* start = hr->bottom();
2891 HeapWord* limit = hr->next_top_at_mark_start();
2892 HeapWord* end = hr->end();
2893
2894 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2895 err_msg("Preconditions not met - "
2896 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2897 "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2898 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2899
2900 assert(hr->next_marked_bytes() == 0, "Precondition");
2901
2902 if (start == limit) {
2903 // NTAMS of this region has not been set so nothing to do.
2904 return false;
2905 }
2906
2907 // 'start' should be in the heap.
2908 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2909 // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2910 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2911
2912 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2913 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2914 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2915
2916 // If ntams is not card aligned then we bump card bitmap index
2917 // for limit so that we get the all the cards spanned by
3139 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3140 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3141 }
3142
3143 // We take a break if someone is trying to stop the world.
3144 bool ConcurrentMark::do_yield_check(uint worker_id) {
3145 if (SuspendibleThreadSet::should_yield()) {
3146 if (worker_id == 0) {
3147 _g1h->g1_policy()->record_concurrent_pause();
3148 }
3149 SuspendibleThreadSet::yield();
3150 return true;
3151 } else {
3152 return false;
3153 }
3154 }
3155
3156 #ifndef PRODUCT
3157 // for debugging purposes
3158 void ConcurrentMark::print_finger() {
3159 gclog_or_tty->print_cr("heap [" PTR_FORMAT ", " PTR_FORMAT "), global finger = " PTR_FORMAT,
3160 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3161 for (uint i = 0; i < _max_worker_id; ++i) {
3162 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3163 }
3164 gclog_or_tty->cr();
3165 }
3166 #endif
3167
3168 template<bool scan>
3169 inline void CMTask::process_grey_object(oop obj) {
3170 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3171 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3172
3173 if (_cm->verbose_high()) {
3174 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3175 _worker_id, p2i((void*) obj));
3176 }
3177
3178 size_t obj_size = obj->size();
3179 _words_scanned += obj_size;
3224
3225 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3226 ConcurrentMark* cm,
3227 CMTask* task)
3228 : _g1h(g1h), _cm(cm), _task(task) {
3229 assert(_ref_processor == NULL, "should be initialized to NULL");
3230
3231 if (G1UseConcMarkReferenceProcessing) {
3232 _ref_processor = g1h->ref_processor_cm();
3233 assert(_ref_processor != NULL, "should not be NULL");
3234 }
3235 }
3236
3237 void CMTask::setup_for_region(HeapRegion* hr) {
3238 assert(hr != NULL,
3239 "claim_region() should have filtered out NULL regions");
3240 assert(!hr->is_continues_humongous(),
3241 "claim_region() should have filtered out continues humongous regions");
3242
3243 if (_cm->verbose_low()) {
3244 gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT,
3245 _worker_id, p2i(hr));
3246 }
3247
3248 _curr_region = hr;
3249 _finger = hr->bottom();
3250 update_region_limit();
3251 }
3252
3253 void CMTask::update_region_limit() {
3254 HeapRegion* hr = _curr_region;
3255 HeapWord* bottom = hr->bottom();
3256 HeapWord* limit = hr->next_top_at_mark_start();
3257
3258 if (limit == bottom) {
3259 if (_cm->verbose_low()) {
3260 gclog_or_tty->print_cr("[%u] found an empty region "
3261 "[" PTR_FORMAT ", " PTR_FORMAT ")",
3262 _worker_id, p2i(bottom), p2i(limit));
3263 }
3264 // The region was collected underneath our feet.
3265 // We set the finger to bottom to ensure that the bitmap
3266 // iteration that will follow this will not do anything.
3267 // (this is not a condition that holds when we set the region up,
3268 // as the region is not supposed to be empty in the first place)
3269 _finger = bottom;
3270 } else if (limit >= _region_limit) {
3271 assert(limit >= _finger, "peace of mind");
3272 } else {
3273 assert(limit < _region_limit, "only way to get here");
3274 // This can happen under some pretty unusual circumstances. An
3275 // evacuation pause empties the region underneath our feet (NTAMS
3276 // at bottom). We then do some allocation in the region (NTAMS
3277 // stays at bottom), followed by the region being used as a GC
3278 // alloc region (NTAMS will move to top() and the objects
3279 // originally below it will be grayed). All objects now marked in
3280 // the region are explicitly grayed, if below the global finger,
3281 // and we do not need in fact to scan anything else. So, we simply
3282 // set _finger to be limit to ensure that the bitmap iteration
3283 // doesn't do anything.
3284 _finger = limit;
3285 }
3286
3287 _region_limit = limit;
3288 }
3289
3290 void CMTask::giveup_current_region() {
3291 assert(_curr_region != NULL, "invariant");
3292 if (_cm->verbose_low()) {
3293 gclog_or_tty->print_cr("[%u] giving up region " PTR_FORMAT,
3294 _worker_id, p2i(_curr_region));
3295 }
3296 clear_region_fields();
3297 }
3298
3299 void CMTask::clear_region_fields() {
3300 // Values for these three fields that indicate that we're not
3301 // holding on to a region.
3302 _curr_region = NULL;
3303 _finger = NULL;
3304 _region_limit = NULL;
3305 }
3306
3307 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3308 if (cm_oop_closure == NULL) {
3309 assert(_cm_oop_closure != NULL, "invariant");
3310 } else {
3311 assert(_cm_oop_closure == NULL, "invariant");
3312 }
3313 _cm_oop_closure = cm_oop_closure;
3395 return;
3396 }
3397
3398 double curr_time_ms = os::elapsedVTime() * 1000.0;
3399
3400 // (3) If marking stats are enabled, then we update the step history.
3401 #if _MARKING_STATS_
3402 if (_words_scanned >= _words_scanned_limit) {
3403 ++_clock_due_to_scanning;
3404 }
3405 if (_refs_reached >= _refs_reached_limit) {
3406 ++_clock_due_to_marking;
3407 }
3408
3409 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3410 _interval_start_time_ms = curr_time_ms;
3411 _all_clock_intervals_ms.add(last_interval_ms);
3412
3413 if (_cm->verbose_medium()) {
3414 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3415 "scanned = " SIZE_FORMAT "%s, refs reached = " SIZE_FORMAT "%s",
3416 _worker_id, last_interval_ms,
3417 _words_scanned,
3418 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3419 _refs_reached,
3420 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3421 }
3422 #endif // _MARKING_STATS_
3423
3424 // (4) We check whether we should yield. If we have to, then we abort.
3425 if (SuspendibleThreadSet::should_yield()) {
3426 // We should yield. To do this we abort the task. The caller is
3427 // responsible for yielding.
3428 set_has_aborted();
3429 statsOnly( ++_aborted_yield );
3430 return;
3431 }
3432
3433 // (5) We check whether we've reached our time quota. If we have,
3434 // then we abort.
3435 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3564 // of things to do) or totally (at the very end).
3565 size_t target_size;
3566 if (partially) {
3567 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3568 } else {
3569 target_size = 0;
3570 }
3571
3572 if (_task_queue->size() > target_size) {
3573 if (_cm->verbose_high()) {
3574 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3575 _worker_id, target_size);
3576 }
3577
3578 oop obj;
3579 bool ret = _task_queue->pop_local(obj);
3580 while (ret) {
3581 statsOnly( ++_local_pops );
3582
3583 if (_cm->verbose_high()) {
3584 gclog_or_tty->print_cr("[%u] popped " PTR_FORMAT, _worker_id,
3585 p2i((void*) obj));
3586 }
3587
3588 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3589 assert(!_g1h->is_on_master_free_list(
3590 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3591
3592 scan_object(obj);
3593
3594 if (_task_queue->size() <= target_size || has_aborted()) {
3595 ret = false;
3596 } else {
3597 ret = _task_queue->pop_local(obj);
3598 }
3599 }
3600
3601 if (_cm->verbose_high()) {
3602 gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3603 _worker_id, _task_queue->size());
3604 }
3921 // This means that we're already holding on to a region.
3922 assert(_finger != NULL, "if region is not NULL, then the finger "
3923 "should not be NULL either");
3924
3925 // We might have restarted this task after an evacuation pause
3926 // which might have evacuated the region we're holding on to
3927 // underneath our feet. Let's read its limit again to make sure
3928 // that we do not iterate over a region of the heap that
3929 // contains garbage (update_region_limit() will also move
3930 // _finger to the start of the region if it is found empty).
3931 update_region_limit();
3932 // We will start from _finger not from the start of the region,
3933 // as we might be restarting this task after aborting half-way
3934 // through scanning this region. In this case, _finger points to
3935 // the address where we last found a marked object. If this is a
3936 // fresh region, _finger points to start().
3937 MemRegion mr = MemRegion(_finger, _region_limit);
3938
3939 if (_cm->verbose_low()) {
3940 gclog_or_tty->print_cr("[%u] we're scanning part "
3941 "[" PTR_FORMAT ", " PTR_FORMAT ") "
3942 "of region " HR_FORMAT,
3943 _worker_id, p2i(_finger), p2i(_region_limit),
3944 HR_FORMAT_PARAMS(_curr_region));
3945 }
3946
3947 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
3948 "humongous regions should go around loop once only");
3949
3950 // Some special cases:
3951 // If the memory region is empty, we can just give up the region.
3952 // If the current region is humongous then we only need to check
3953 // the bitmap for the bit associated with the start of the object,
3954 // scan the object if it's live, and give up the region.
3955 // Otherwise, let's iterate over the bitmap of the part of the region
3956 // that is left.
3957 // If the iteration is successful, give up the region.
3958 if (mr.is_empty()) {
3959 giveup_current_region();
3960 regular_clock_call();
3961 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
3962 if (_nextMarkBitMap->isMarked(mr.start())) {
4009 // return NULL with potentially more regions available for
4010 // claiming and why we have to check out_of_regions() to determine
4011 // whether we're done or not.
4012 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4013 // We are going to try to claim a new region. We should have
4014 // given up on the previous one.
4015 // Separated the asserts so that we know which one fires.
4016 assert(_curr_region == NULL, "invariant");
4017 assert(_finger == NULL, "invariant");
4018 assert(_region_limit == NULL, "invariant");
4019 if (_cm->verbose_low()) {
4020 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4021 }
4022 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4023 if (claimed_region != NULL) {
4024 // Yes, we managed to claim one
4025 statsOnly( ++_regions_claimed );
4026
4027 if (_cm->verbose_low()) {
4028 gclog_or_tty->print_cr("[%u] we successfully claimed "
4029 "region " PTR_FORMAT,
4030 _worker_id, p2i(claimed_region));
4031 }
4032
4033 setup_for_region(claimed_region);
4034 assert(_curr_region == claimed_region, "invariant");
4035 }
4036 // It is important to call the regular clock here. It might take
4037 // a while to claim a region if, for example, we hit a large
4038 // block of empty regions. So we need to call the regular clock
4039 // method once round the loop to make sure it's called
4040 // frequently enough.
4041 regular_clock_call();
4042 }
4043
4044 if (!has_aborted() && _curr_region == NULL) {
4045 assert(_cm->out_of_regions(),
4046 "at this point we should be out of regions");
4047 }
4048 } while ( _curr_region != NULL && !has_aborted());
4049
4070 // Attempt at work stealing from other task's queues.
4071 if (do_stealing && !has_aborted()) {
4072 // We have not aborted. This means that we have finished all that
4073 // we could. Let's try to do some stealing...
4074
4075 // We cannot check whether the global stack is empty, since other
4076 // tasks might be pushing objects to it concurrently.
4077 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4078 "only way to reach here");
4079
4080 if (_cm->verbose_low()) {
4081 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4082 }
4083
4084 while (!has_aborted()) {
4085 oop obj;
4086 statsOnly( ++_steal_attempts );
4087
4088 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4089 if (_cm->verbose_medium()) {
4090 gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully",
4091 _worker_id, p2i((void*) obj));
4092 }
4093
4094 statsOnly( ++_steals );
4095
4096 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4097 "any stolen object should be marked");
4098 scan_object(obj);
4099
4100 // And since we're towards the end, let's totally drain the
4101 // local queue and global stack.
4102 drain_local_queue(false);
4103 drain_global_stack(false);
4104 } else {
4105 break;
4106 }
4107 }
4108 }
4109
4110 // If we are about to wrap up and go into termination, check if we
4278 guarantee(task_queue != NULL, "invariant");
4279 guarantee(task_queues != NULL, "invariant");
4280
4281 statsOnly( _clock_due_to_scanning = 0;
4282 _clock_due_to_marking = 0 );
4283
4284 _marking_step_diffs_ms.add(0.5);
4285 }
4286
4287 // These are formatting macros that are used below to ensure
4288 // consistent formatting. The *_H_* versions are used to format the
4289 // header for a particular value and they should be kept consistent
4290 // with the corresponding macro. Also note that most of the macros add
4291 // the necessary white space (as a prefix) which makes them a bit
4292 // easier to compose.
4293
4294 // All the output lines are prefixed with this string to be able to
4295 // identify them easily in a large log file.
4296 #define G1PPRL_LINE_PREFIX "###"
4297
4298 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
4299 #ifdef _LP64
4300 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4301 #else // _LP64
4302 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4303 #endif // _LP64
4304
4305 // For per-region info
4306 #define G1PPRL_TYPE_FORMAT " %-4s"
4307 #define G1PPRL_TYPE_H_FORMAT " %4s"
4308 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
4309 #define G1PPRL_BYTE_H_FORMAT " %9s"
4310 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4311 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4312
4313 // For summary info
4314 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
4315 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
4316 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
4317 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
4318
4319 G1PrintRegionLivenessInfoClosure::
4320 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4321 : _out(out),
4322 _total_used_bytes(0), _total_capacity_bytes(0),
4323 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4324 _hum_used_bytes(0), _hum_capacity_bytes(0),
4325 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4326 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4327 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4328 MemRegion g1_reserved = g1h->g1_reserved();
4329 double now = os::elapsedTime();
4330
4331 // Print the header of the output.
4332 _out->cr();
4333 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4334 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4335 G1PPRL_SUM_ADDR_FORMAT("reserved")
4336 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4337 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
|