< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page




 267   // Double capacity if possible
 268   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 269   // Do not give up existing stack until we have managed to
 270   // get the double capacity that we desired.
 271   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 272                                                            sizeof(oop)));
 273   if (rs.is_reserved()) {
 274     // Release the backing store associated with old stack
 275     _virtual_space.release();
 276     // Reinitialize virtual space for new stack
 277     if (!_virtual_space.initialize(rs, rs.size())) {
 278       fatal("Not enough swap for expanded marking stack capacity");
 279     }
 280     _base = (oop*)(_virtual_space.low());
 281     _index = 0;
 282     _capacity = new_capacity;
 283   } else {
 284     if (PrintGCDetails && Verbose) {
 285       // Failed to double capacity, continue;
 286       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 287                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 288                           _capacity / K, new_capacity / K);
 289     }
 290   }
 291 }
 292 
 293 void CMMarkStack::set_should_expand() {
 294   // If we're resetting the marking state because of an
 295   // marking stack overflow, record that we should, if
 296   // possible, expand the stack.
 297   _should_expand = _cm->has_overflown();
 298 }
 299 
 300 CMMarkStack::~CMMarkStack() {
 301   if (_base != NULL) {
 302     _base = NULL;
 303     _virtual_space.release();
 304   }
 305 }
 306 
 307 void CMMarkStack::par_push(oop ptr) {


 564   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 565   _cleanup_times(),
 566   _total_counting_time(0.0),
 567   _total_rs_scrub_time(0.0),
 568 
 569   _parallel_workers(NULL),
 570 
 571   _count_card_bitmaps(NULL),
 572   _count_marked_bytes(NULL),
 573   _completed_initialization(false) {
 574   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 575   if (verbose_level < no_verbose) {
 576     verbose_level = no_verbose;
 577   }
 578   if (verbose_level > high_verbose) {
 579     verbose_level = high_verbose;
 580   }
 581   _verbose_level = verbose_level;
 582 
 583   if (verbose_low()) {
 584     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 585                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 586   }
 587 
 588   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 589   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 590 
 591   // Create & start a ConcurrentMark thread.
 592   _cmThread = new ConcurrentMarkThread(this);
 593   assert(cmThread() != NULL, "CM Thread should have been created");
 594   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 595   if (_cmThread->osthread() == NULL) {
 596       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 597   }
 598 
 599   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 600   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 601   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 602 
 603   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 604   satb_qs.set_buffer_size(G1SATBBufferSize);


 820   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 821   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 822 }
 823 
 824 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 825   set_concurrency(active_tasks);
 826 
 827   _concurrent = concurrent;
 828   // We propagate this to all tasks, not just the active ones.
 829   for (uint i = 0; i < _max_worker_id; ++i)
 830     _tasks[i]->set_concurrent(concurrent);
 831 
 832   if (concurrent) {
 833     set_concurrent_marking_in_progress();
 834   } else {
 835     // We currently assume that the concurrent flag has been set to
 836     // false before we start remark. At this point we should also be
 837     // in a STW phase.
 838     assert(!concurrent_marking_in_progress(), "invariant");
 839     assert(out_of_regions(),
 840            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 841                    p2i(_finger), p2i(_heap_end)));
 842   }
 843 }
 844 
 845 void ConcurrentMark::set_non_marking_state() {
 846   // We set the global marking state to some default values when we're
 847   // not doing marking.
 848   reset_marking_state();
 849   _active_tasks = 0;
 850   clear_concurrent_marking_in_progress();
 851 }
 852 
 853 ConcurrentMark::~ConcurrentMark() {
 854   // The ConcurrentMark instance is never freed.
 855   ShouldNotReachHere();
 856 }
 857 
 858 void ConcurrentMark::clearNextBitmap() {
 859   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 860 


1435     _bm(bm), _region_marked_bytes(0) { }
1436 
1437   bool doHeapRegion(HeapRegion* hr) {
1438 
1439     if (hr->continuesHumongous()) {
1440       // We will ignore these here and process them when their
1441       // associated "starts humongous" region is processed (see
1442       // set_bit_for_heap_region()). Note that we cannot rely on their
1443       // associated "starts humongous" region to have their bit set to
1444       // 1 since, due to the region chunking in the parallel region
1445       // iteration, a "continues humongous" region might be visited
1446       // before its associated "starts humongous".
1447       return false;
1448     }
1449 
1450     HeapWord* ntams = hr->next_top_at_mark_start();
1451     HeapWord* start = hr->bottom();
1452 
1453     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1454            err_msg("Preconditions not met - "
1455                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1456                    p2i(start), p2i(ntams), p2i(hr->end())));
1457 
1458     // Find the first marked object at or after "start".
1459     start = _bm->getNextMarkedWordAddress(start, ntams);
1460 
1461     size_t marked_bytes = 0;
1462 
1463     while (start < ntams) {
1464       oop obj = oop(start);
1465       int obj_sz = obj->size();
1466       HeapWord* obj_end = start + obj_sz;
1467 
1468       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1469       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1470 
1471       // Note: if we're looking at the last region in heap - obj_end
1472       // could be actually just beyond the end of the heap; end_idx
1473       // will then correspond to a (non-existent) card that is also
1474       // just beyond the heap.
1475       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {


1752     // Mark the allocated-since-marking portion...
1753     if (ntams < top) {
1754       // This definitely means the region has live objects.
1755       set_bit_for_region(hr);
1756 
1757       // Now set the bits in the card bitmap for [ntams, top)
1758       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1759       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1760 
1761       // Note: if we're looking at the last region in heap - top
1762       // could be actually just beyond the end of the heap; end_idx
1763       // will then correspond to a (non-existent) card that is also
1764       // just beyond the heap.
1765       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1766         // end of object is not card aligned - increment to cover
1767         // all the cards spanned by the object
1768         end_idx += 1;
1769       }
1770 
1771       assert(end_idx <= _card_bm->size(),
1772              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1773                      end_idx, _card_bm->size()));
1774       assert(start_idx < _card_bm->size(),
1775              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1776                      start_idx, _card_bm->size()));
1777 
1778       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1779     }
1780 
1781     // Set the bit for the region if it contains live data
1782     if (hr->next_marked_bytes() > 0) {
1783       set_bit_for_region(hr);
1784     }
1785 
1786     return false;
1787   }
1788 };
1789 
1790 class G1ParFinalCountTask: public AbstractGangTask {
1791 protected:
1792   G1CollectedHeap* _g1h;
1793   ConcurrentMark* _cm;
1794   BitMap* _actual_region_bm;
1795   BitMap* _actual_card_bm;


2260   int             _ref_counter_limit;
2261   int             _ref_counter;
2262   bool            _is_serial;
2263  public:
2264   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2265     _cm(cm), _task(task), _is_serial(is_serial),
2266     _ref_counter_limit(G1RefProcDrainInterval) {
2267     assert(_ref_counter_limit > 0, "sanity");
2268     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2269     _ref_counter = _ref_counter_limit;
2270   }
2271 
2272   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2273   virtual void do_oop(      oop* p) { do_oop_work(p); }
2274 
2275   template <class T> void do_oop_work(T* p) {
2276     if (!_cm->has_overflown()) {
2277       oop obj = oopDesc::load_decode_heap_oop(p);
2278       if (_cm->verbose_high()) {
2279         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2280                                "*"PTR_FORMAT" = "PTR_FORMAT,
2281                                _task->worker_id(), p2i(p), p2i((void*) obj));
2282       }
2283 
2284       _task->deal_with_reference(obj);
2285       _ref_counter--;
2286 
2287       if (_ref_counter == 0) {
2288         // We have dealt with _ref_counter_limit references, pushing them
2289         // and objects reachable from them on to the local stack (and
2290         // possibly the global stack). Call CMTask::do_marking_step() to
2291         // process these entries.
2292         //
2293         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2294         // there's nothing more to do (i.e. we're done with the entries that
2295         // were pushed as a result of the CMTask::deal_with_reference() calls
2296         // above) or we overflow.
2297         //
2298         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2299         // flag while there may still be some work to do. (See the comment at
2300         // the beginning of CMTask::do_marking_step() for those conditions -


3001     //      it will skip the subsequent CH regions).
3002     // If it comes across a region that suddenly becomes CH, the
3003     // scenario will be similar to b). So, the race between
3004     // claim_region() and a humongous object allocation might force us
3005     // to do a bit of unnecessary work (due to some unnecessary bitmap
3006     // iterations) but it should not introduce and correctness issues.
3007     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
3008 
3009     // Above heap_region_containing_raw may return NULL as we always scan claim
3010     // until the end of the heap. In this case, just jump to the next region.
3011     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
3012 
3013     // Is the gap between reading the finger and doing the CAS too long?
3014     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
3015     if (res == finger && curr_region != NULL) {
3016       // we succeeded
3017       HeapWord*   bottom        = curr_region->bottom();
3018       HeapWord*   limit         = curr_region->next_top_at_mark_start();
3019 
3020       if (verbose_low()) {
3021         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
3022                                "["PTR_FORMAT", "PTR_FORMAT"), "
3023                                "limit = "PTR_FORMAT,
3024                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
3025       }
3026 
3027       // notice that _finger == end cannot be guaranteed here since,
3028       // someone else might have moved the finger even further
3029       assert(_finger >= end, "the finger should have moved forward");
3030 
3031       if (verbose_low()) {
3032         gclog_or_tty->print_cr("[%u] we were successful with region = "
3033                                PTR_FORMAT, worker_id, p2i(curr_region));
3034       }
3035 
3036       if (limit > bottom) {
3037         if (verbose_low()) {
3038           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
3039                                  "returning it ", worker_id, p2i(curr_region));
3040         }
3041         return curr_region;
3042       } else {
3043         assert(limit == bottom,
3044                "the region limit should be at bottom");
3045         if (verbose_low()) {
3046           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
3047                                  "returning NULL", worker_id, p2i(curr_region));
3048         }
3049         // we return NULL and the caller should try calling
3050         // claim_region() again.
3051         return NULL;
3052       }
3053     } else {
3054       assert(_finger > finger, "the finger should have moved forward");
3055       if (verbose_low()) {
3056         if (curr_region == NULL) {
3057           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
3058                                  "global finger = "PTR_FORMAT", "
3059                                  "our finger = "PTR_FORMAT,
3060                                  worker_id, p2i(_finger), p2i(finger));
3061         } else {
3062           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3063                                  "global finger = "PTR_FORMAT", "
3064                                  "our finger = "PTR_FORMAT,
3065                                  worker_id, p2i(_finger), p2i(finger));
3066         }
3067       }
3068 
3069       // read it again
3070       finger = _finger;
3071     }
3072   }
3073 
3074   return NULL;
3075 }
3076 
3077 #ifndef PRODUCT
3078 enum VerifyNoCSetOopsPhase {
3079   VerifyNoCSetOopsStack,
3080   VerifyNoCSetOopsQueues
3081 };
3082 
3083 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
3084 private:
3085   G1CollectedHeap* _g1h;
3086   VerifyNoCSetOopsPhase _phase;
3087   int _info;
3088 
3089   const char* phase_str() {
3090     switch (_phase) {
3091     case VerifyNoCSetOopsStack:         return "Stack";
3092     case VerifyNoCSetOopsQueues:        return "Queue";
3093     default:                            ShouldNotReachHere();
3094     }
3095     return NULL;
3096   }
3097 
3098   void do_object_work(oop obj) {
3099     guarantee(!_g1h->obj_in_cs(obj),
3100               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3101                       p2i((void*) obj), phase_str(), _info));
3102   }
3103 
3104 public:
3105   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3106 
3107   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3108     _phase = phase;
3109     _info = info;
3110   }
3111 
3112   virtual void do_oop(oop* p) {
3113     oop obj = oopDesc::load_decode_heap_oop(p);
3114     do_object_work(obj);
3115   }
3116 
3117   virtual void do_oop(narrowOop* p) {
3118     // We should not come across narrow oops while scanning marking
3119     // stacks
3120     ShouldNotReachHere();


3141   for (uint i = 0; i < _max_worker_id; i += 1) {
3142     cl.set_phase(VerifyNoCSetOopsQueues, i);
3143     CMTaskQueue* queue = _task_queues->queue(i);
3144     queue->oops_do(&cl);
3145   }
3146 
3147   // Verify the global finger
3148   HeapWord* global_finger = finger();
3149   if (global_finger != NULL && global_finger < _heap_end) {
3150     // The global finger always points to a heap region boundary. We
3151     // use heap_region_containing_raw() to get the containing region
3152     // given that the global finger could be pointing to a free region
3153     // which subsequently becomes continues humongous. If that
3154     // happens, heap_region_containing() will return the bottom of the
3155     // corresponding starts humongous region and the check below will
3156     // not hold any more.
3157     // Since we always iterate over all regions, we might get a NULL HeapRegion
3158     // here.
3159     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3160     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3161               err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3162                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3163   }
3164 
3165   // Verify the task fingers
3166   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3167   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3168     CMTask* task = _tasks[i];
3169     HeapWord* task_finger = task->finger();
3170     if (task_finger != NULL && task_finger < _heap_end) {
3171       // See above note on the global finger verification.
3172       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3173       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3174                 !task_hr->in_collection_set(),
3175                 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3176                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3177     }
3178   }
3179 }
3180 #endif // PRODUCT
3181 
3182 // Aggregate the counting data that was constructed concurrently
3183 // with marking.
3184 class AggregateCountDataHRClosure: public HeapRegionClosure {
3185   G1CollectedHeap* _g1h;
3186   ConcurrentMark* _cm;
3187   CardTableModRefBS* _ct_bs;
3188   BitMap* _cm_card_bm;
3189   uint _max_worker_id;
3190 
3191  public:
3192   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3193                               BitMap* cm_card_bm,
3194                               uint max_worker_id) :
3195     _g1h(g1h), _cm(g1h->concurrent_mark()),


3197     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3198 
3199   bool doHeapRegion(HeapRegion* hr) {
3200     if (hr->continuesHumongous()) {
3201       // We will ignore these here and process them when their
3202       // associated "starts humongous" region is processed.
3203       // Note that we cannot rely on their associated
3204       // "starts humongous" region to have their bit set to 1
3205       // since, due to the region chunking in the parallel region
3206       // iteration, a "continues humongous" region might be visited
3207       // before its associated "starts humongous".
3208       return false;
3209     }
3210 
3211     HeapWord* start = hr->bottom();
3212     HeapWord* limit = hr->next_top_at_mark_start();
3213     HeapWord* end = hr->end();
3214 
3215     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3216            err_msg("Preconditions not met - "
3217                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3218                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3219                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3220 
3221     assert(hr->next_marked_bytes() == 0, "Precondition");
3222 
3223     if (start == limit) {
3224       // NTAMS of this region has not been set so nothing to do.
3225       return false;
3226     }
3227 
3228     // 'start' should be in the heap.
3229     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3230     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3231     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3232 
3233     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3234     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3235     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3236 
3237     // If ntams is not card aligned then we bump card bitmap index
3238     // for limit so that we get the all the cards spanned by


3477   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3478   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3479 }
3480 
3481 // We take a break if someone is trying to stop the world.
3482 bool ConcurrentMark::do_yield_check(uint worker_id) {
3483   if (SuspendibleThreadSet::should_yield()) {
3484     if (worker_id == 0) {
3485       _g1h->g1_policy()->record_concurrent_pause();
3486     }
3487     SuspendibleThreadSet::yield();
3488     return true;
3489   } else {
3490     return false;
3491   }
3492 }
3493 
3494 #ifndef PRODUCT
3495 // for debugging purposes
3496 void ConcurrentMark::print_finger() {
3497   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3498                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3499   for (uint i = 0; i < _max_worker_id; ++i) {
3500     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3501   }
3502   gclog_or_tty->cr();
3503 }
3504 #endif
3505 
3506 template<bool scan>
3507 inline void CMTask::process_grey_object(oop obj) {
3508   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3509   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3510 
3511   if (_cm->verbose_high()) {
3512     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3513                            _worker_id, p2i((void*) obj));
3514   }
3515 
3516   size_t obj_size = obj->size();
3517   _words_scanned += obj_size;


3562 
3563 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3564                                ConcurrentMark* cm,
3565                                CMTask* task)
3566   : _g1h(g1h), _cm(cm), _task(task) {
3567   assert(_ref_processor == NULL, "should be initialized to NULL");
3568 
3569   if (G1UseConcMarkReferenceProcessing) {
3570     _ref_processor = g1h->ref_processor_cm();
3571     assert(_ref_processor != NULL, "should not be NULL");
3572   }
3573 }
3574 
3575 void CMTask::setup_for_region(HeapRegion* hr) {
3576   assert(hr != NULL,
3577         "claim_region() should have filtered out NULL regions");
3578   assert(!hr->continuesHumongous(),
3579         "claim_region() should have filtered out continues humongous regions");
3580 
3581   if (_cm->verbose_low()) {
3582     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3583                            _worker_id, p2i(hr));
3584   }
3585 
3586   _curr_region  = hr;
3587   _finger       = hr->bottom();
3588   update_region_limit();
3589 }
3590 
3591 void CMTask::update_region_limit() {
3592   HeapRegion* hr            = _curr_region;
3593   HeapWord* bottom          = hr->bottom();
3594   HeapWord* limit           = hr->next_top_at_mark_start();
3595 
3596   if (limit == bottom) {
3597     if (_cm->verbose_low()) {
3598       gclog_or_tty->print_cr("[%u] found an empty region "
3599                              "["PTR_FORMAT", "PTR_FORMAT")",
3600                              _worker_id, p2i(bottom), p2i(limit));
3601     }
3602     // The region was collected underneath our feet.
3603     // We set the finger to bottom to ensure that the bitmap
3604     // iteration that will follow this will not do anything.
3605     // (this is not a condition that holds when we set the region up,
3606     // as the region is not supposed to be empty in the first place)
3607     _finger = bottom;
3608   } else if (limit >= _region_limit) {
3609     assert(limit >= _finger, "peace of mind");
3610   } else {
3611     assert(limit < _region_limit, "only way to get here");
3612     // This can happen under some pretty unusual circumstances.  An
3613     // evacuation pause empties the region underneath our feet (NTAMS
3614     // at bottom). We then do some allocation in the region (NTAMS
3615     // stays at bottom), followed by the region being used as a GC
3616     // alloc region (NTAMS will move to top() and the objects
3617     // originally below it will be grayed). All objects now marked in
3618     // the region are explicitly grayed, if below the global finger,
3619     // and we do not need in fact to scan anything else. So, we simply
3620     // set _finger to be limit to ensure that the bitmap iteration
3621     // doesn't do anything.
3622     _finger = limit;
3623   }
3624 
3625   _region_limit = limit;
3626 }
3627 
3628 void CMTask::giveup_current_region() {
3629   assert(_curr_region != NULL, "invariant");
3630   if (_cm->verbose_low()) {
3631     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3632                            _worker_id, p2i(_curr_region));
3633   }
3634   clear_region_fields();
3635 }
3636 
3637 void CMTask::clear_region_fields() {
3638   // Values for these three fields that indicate that we're not
3639   // holding on to a region.
3640   _curr_region   = NULL;
3641   _finger        = NULL;
3642   _region_limit  = NULL;
3643 }
3644 
3645 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3646   if (cm_oop_closure == NULL) {
3647     assert(_cm_oop_closure != NULL, "invariant");
3648   } else {
3649     assert(_cm_oop_closure == NULL, "invariant");
3650   }
3651   _cm_oop_closure = cm_oop_closure;


3733     return;
3734   }
3735 
3736   double curr_time_ms = os::elapsedVTime() * 1000.0;
3737 
3738   // (3) If marking stats are enabled, then we update the step history.
3739 #if _MARKING_STATS_
3740   if (_words_scanned >= _words_scanned_limit) {
3741     ++_clock_due_to_scanning;
3742   }
3743   if (_refs_reached >= _refs_reached_limit) {
3744     ++_clock_due_to_marking;
3745   }
3746 
3747   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3748   _interval_start_time_ms = curr_time_ms;
3749   _all_clock_intervals_ms.add(last_interval_ms);
3750 
3751   if (_cm->verbose_medium()) {
3752       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3753                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3754                         _worker_id, last_interval_ms,
3755                         _words_scanned,
3756                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3757                         _refs_reached,
3758                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3759   }
3760 #endif // _MARKING_STATS_
3761 
3762   // (4) We check whether we should yield. If we have to, then we abort.
3763   if (SuspendibleThreadSet::should_yield()) {
3764     // We should yield. To do this we abort the task. The caller is
3765     // responsible for yielding.
3766     set_has_aborted();
3767     statsOnly( ++_aborted_yield );
3768     return;
3769   }
3770 
3771   // (5) We check whether we've reached our time quota. If we have,
3772   // then we abort.
3773   double elapsed_time_ms = curr_time_ms - _start_time_ms;


3902   // of things to do) or totally (at the very end).
3903   size_t target_size;
3904   if (partially) {
3905     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3906   } else {
3907     target_size = 0;
3908   }
3909 
3910   if (_task_queue->size() > target_size) {
3911     if (_cm->verbose_high()) {
3912       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3913                              _worker_id, target_size);
3914     }
3915 
3916     oop obj;
3917     bool ret = _task_queue->pop_local(obj);
3918     while (ret) {
3919       statsOnly( ++_local_pops );
3920 
3921       if (_cm->verbose_high()) {
3922         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3923                                p2i((void*) obj));
3924       }
3925 
3926       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3927       assert(!_g1h->is_on_master_free_list(
3928                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3929 
3930       scan_object(obj);
3931 
3932       if (_task_queue->size() <= target_size || has_aborted()) {
3933         ret = false;
3934       } else {
3935         ret = _task_queue->pop_local(obj);
3936       }
3937     }
3938 
3939     if (_cm->verbose_high()) {
3940       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3941                              _worker_id, _task_queue->size());
3942     }


4255       // This means that we're already holding on to a region.
4256       assert(_finger != NULL, "if region is not NULL, then the finger "
4257              "should not be NULL either");
4258 
4259       // We might have restarted this task after an evacuation pause
4260       // which might have evacuated the region we're holding on to
4261       // underneath our feet. Let's read its limit again to make sure
4262       // that we do not iterate over a region of the heap that
4263       // contains garbage (update_region_limit() will also move
4264       // _finger to the start of the region if it is found empty).
4265       update_region_limit();
4266       // We will start from _finger not from the start of the region,
4267       // as we might be restarting this task after aborting half-way
4268       // through scanning this region. In this case, _finger points to
4269       // the address where we last found a marked object. If this is a
4270       // fresh region, _finger points to start().
4271       MemRegion mr = MemRegion(_finger, _region_limit);
4272 
4273       if (_cm->verbose_low()) {
4274         gclog_or_tty->print_cr("[%u] we're scanning part "
4275                                "["PTR_FORMAT", "PTR_FORMAT") "
4276                                "of region "HR_FORMAT,
4277                                _worker_id, p2i(_finger), p2i(_region_limit),
4278                                HR_FORMAT_PARAMS(_curr_region));
4279       }
4280 
4281       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4282              "humongous regions should go around loop once only");
4283 
4284       // Some special cases:
4285       // If the memory region is empty, we can just give up the region.
4286       // If the current region is humongous then we only need to check
4287       // the bitmap for the bit associated with the start of the object,
4288       // scan the object if it's live, and give up the region.
4289       // Otherwise, let's iterate over the bitmap of the part of the region
4290       // that is left.
4291       // If the iteration is successful, give up the region.
4292       if (mr.is_empty()) {
4293         giveup_current_region();
4294         regular_clock_call();
4295       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4296         if (_nextMarkBitMap->isMarked(mr.start())) {


4343     // return NULL with potentially more regions available for
4344     // claiming and why we have to check out_of_regions() to determine
4345     // whether we're done or not.
4346     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4347       // We are going to try to claim a new region. We should have
4348       // given up on the previous one.
4349       // Separated the asserts so that we know which one fires.
4350       assert(_curr_region  == NULL, "invariant");
4351       assert(_finger       == NULL, "invariant");
4352       assert(_region_limit == NULL, "invariant");
4353       if (_cm->verbose_low()) {
4354         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4355       }
4356       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4357       if (claimed_region != NULL) {
4358         // Yes, we managed to claim one
4359         statsOnly( ++_regions_claimed );
4360 
4361         if (_cm->verbose_low()) {
4362           gclog_or_tty->print_cr("[%u] we successfully claimed "
4363                                  "region "PTR_FORMAT,
4364                                  _worker_id, p2i(claimed_region));
4365         }
4366 
4367         setup_for_region(claimed_region);
4368         assert(_curr_region == claimed_region, "invariant");
4369       }
4370       // It is important to call the regular clock here. It might take
4371       // a while to claim a region if, for example, we hit a large
4372       // block of empty regions. So we need to call the regular clock
4373       // method once round the loop to make sure it's called
4374       // frequently enough.
4375       regular_clock_call();
4376     }
4377 
4378     if (!has_aborted() && _curr_region == NULL) {
4379       assert(_cm->out_of_regions(),
4380              "at this point we should be out of regions");
4381     }
4382   } while ( _curr_region != NULL && !has_aborted());
4383 


4404   // Attempt at work stealing from other task's queues.
4405   if (do_stealing && !has_aborted()) {
4406     // We have not aborted. This means that we have finished all that
4407     // we could. Let's try to do some stealing...
4408 
4409     // We cannot check whether the global stack is empty, since other
4410     // tasks might be pushing objects to it concurrently.
4411     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4412            "only way to reach here");
4413 
4414     if (_cm->verbose_low()) {
4415       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4416     }
4417 
4418     while (!has_aborted()) {
4419       oop obj;
4420       statsOnly( ++_steal_attempts );
4421 
4422       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4423         if (_cm->verbose_medium()) {
4424           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4425                                  _worker_id, p2i((void*) obj));
4426         }
4427 
4428         statsOnly( ++_steals );
4429 
4430         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4431                "any stolen object should be marked");
4432         scan_object(obj);
4433 
4434         // And since we're towards the end, let's totally drain the
4435         // local queue and global stack.
4436         drain_local_queue(false);
4437         drain_global_stack(false);
4438       } else {
4439         break;
4440       }
4441     }
4442   }
4443 
4444   // If we are about to wrap up and go into termination, check if we


4612   guarantee(task_queue != NULL, "invariant");
4613   guarantee(task_queues != NULL, "invariant");
4614 
4615   statsOnly( _clock_due_to_scanning = 0;
4616              _clock_due_to_marking  = 0 );
4617 
4618   _marking_step_diffs_ms.add(0.5);
4619 }
4620 
4621 // These are formatting macros that are used below to ensure
4622 // consistent formatting. The *_H_* versions are used to format the
4623 // header for a particular value and they should be kept consistent
4624 // with the corresponding macro. Also note that most of the macros add
4625 // the necessary white space (as a prefix) which makes them a bit
4626 // easier to compose.
4627 
4628 // All the output lines are prefixed with this string to be able to
4629 // identify them easily in a large log file.
4630 #define G1PPRL_LINE_PREFIX            "###"
4631 
4632 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4633 #ifdef _LP64
4634 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4635 #else // _LP64
4636 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4637 #endif // _LP64
4638 
4639 // For per-region info
4640 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4641 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4642 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4643 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4644 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4645 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4646 
4647 // For summary info
4648 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4649 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4650 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4651 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4652 
4653 G1PrintRegionLivenessInfoClosure::
4654 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4655   : _out(out),
4656     _total_used_bytes(0), _total_capacity_bytes(0),
4657     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4658     _hum_used_bytes(0), _hum_capacity_bytes(0),
4659     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4660     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4661   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4662   MemRegion g1_reserved = g1h->g1_reserved();
4663   double now = os::elapsedTime();
4664 
4665   // Print the header of the output.
4666   _out->cr();
4667   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4668   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4669                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4670                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4671                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),




 267   // Double capacity if possible
 268   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 269   // Do not give up existing stack until we have managed to
 270   // get the double capacity that we desired.
 271   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 272                                                            sizeof(oop)));
 273   if (rs.is_reserved()) {
 274     // Release the backing store associated with old stack
 275     _virtual_space.release();
 276     // Reinitialize virtual space for new stack
 277     if (!_virtual_space.initialize(rs, rs.size())) {
 278       fatal("Not enough swap for expanded marking stack capacity");
 279     }
 280     _base = (oop*)(_virtual_space.low());
 281     _index = 0;
 282     _capacity = new_capacity;
 283   } else {
 284     if (PrintGCDetails && Verbose) {
 285       // Failed to double capacity, continue;
 286       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 287                           SIZE_FORMAT "K to " SIZE_FORMAT "K",
 288                           _capacity / K, new_capacity / K);
 289     }
 290   }
 291 }
 292 
 293 void CMMarkStack::set_should_expand() {
 294   // If we're resetting the marking state because of an
 295   // marking stack overflow, record that we should, if
 296   // possible, expand the stack.
 297   _should_expand = _cm->has_overflown();
 298 }
 299 
 300 CMMarkStack::~CMMarkStack() {
 301   if (_base != NULL) {
 302     _base = NULL;
 303     _virtual_space.release();
 304   }
 305 }
 306 
 307 void CMMarkStack::par_push(oop ptr) {


 564   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 565   _cleanup_times(),
 566   _total_counting_time(0.0),
 567   _total_rs_scrub_time(0.0),
 568 
 569   _parallel_workers(NULL),
 570 
 571   _count_card_bitmaps(NULL),
 572   _count_marked_bytes(NULL),
 573   _completed_initialization(false) {
 574   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 575   if (verbose_level < no_verbose) {
 576     verbose_level = no_verbose;
 577   }
 578   if (verbose_level > high_verbose) {
 579     verbose_level = high_verbose;
 580   }
 581   _verbose_level = verbose_level;
 582 
 583   if (verbose_low()) {
 584     gclog_or_tty->print_cr("[global] init, heap start = " PTR_FORMAT", "
 585                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 586   }
 587 
 588   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 589   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 590 
 591   // Create & start a ConcurrentMark thread.
 592   _cmThread = new ConcurrentMarkThread(this);
 593   assert(cmThread() != NULL, "CM Thread should have been created");
 594   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 595   if (_cmThread->osthread() == NULL) {
 596       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 597   }
 598 
 599   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 600   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 601   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 602 
 603   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 604   satb_qs.set_buffer_size(G1SATBBufferSize);


 820   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 821   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 822 }
 823 
 824 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 825   set_concurrency(active_tasks);
 826 
 827   _concurrent = concurrent;
 828   // We propagate this to all tasks, not just the active ones.
 829   for (uint i = 0; i < _max_worker_id; ++i)
 830     _tasks[i]->set_concurrent(concurrent);
 831 
 832   if (concurrent) {
 833     set_concurrent_marking_in_progress();
 834   } else {
 835     // We currently assume that the concurrent flag has been set to
 836     // false before we start remark. At this point we should also be
 837     // in a STW phase.
 838     assert(!concurrent_marking_in_progress(), "invariant");
 839     assert(out_of_regions(),
 840            err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 841                    p2i(_finger), p2i(_heap_end)));
 842   }
 843 }
 844 
 845 void ConcurrentMark::set_non_marking_state() {
 846   // We set the global marking state to some default values when we're
 847   // not doing marking.
 848   reset_marking_state();
 849   _active_tasks = 0;
 850   clear_concurrent_marking_in_progress();
 851 }
 852 
 853 ConcurrentMark::~ConcurrentMark() {
 854   // The ConcurrentMark instance is never freed.
 855   ShouldNotReachHere();
 856 }
 857 
 858 void ConcurrentMark::clearNextBitmap() {
 859   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 860 


1435     _bm(bm), _region_marked_bytes(0) { }
1436 
1437   bool doHeapRegion(HeapRegion* hr) {
1438 
1439     if (hr->continuesHumongous()) {
1440       // We will ignore these here and process them when their
1441       // associated "starts humongous" region is processed (see
1442       // set_bit_for_heap_region()). Note that we cannot rely on their
1443       // associated "starts humongous" region to have their bit set to
1444       // 1 since, due to the region chunking in the parallel region
1445       // iteration, a "continues humongous" region might be visited
1446       // before its associated "starts humongous".
1447       return false;
1448     }
1449 
1450     HeapWord* ntams = hr->next_top_at_mark_start();
1451     HeapWord* start = hr->bottom();
1452 
1453     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1454            err_msg("Preconditions not met - "
1455                    "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1456                    p2i(start), p2i(ntams), p2i(hr->end())));
1457 
1458     // Find the first marked object at or after "start".
1459     start = _bm->getNextMarkedWordAddress(start, ntams);
1460 
1461     size_t marked_bytes = 0;
1462 
1463     while (start < ntams) {
1464       oop obj = oop(start);
1465       int obj_sz = obj->size();
1466       HeapWord* obj_end = start + obj_sz;
1467 
1468       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1469       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1470 
1471       // Note: if we're looking at the last region in heap - obj_end
1472       // could be actually just beyond the end of the heap; end_idx
1473       // will then correspond to a (non-existent) card that is also
1474       // just beyond the heap.
1475       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {


1752     // Mark the allocated-since-marking portion...
1753     if (ntams < top) {
1754       // This definitely means the region has live objects.
1755       set_bit_for_region(hr);
1756 
1757       // Now set the bits in the card bitmap for [ntams, top)
1758       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1759       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1760 
1761       // Note: if we're looking at the last region in heap - top
1762       // could be actually just beyond the end of the heap; end_idx
1763       // will then correspond to a (non-existent) card that is also
1764       // just beyond the heap.
1765       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1766         // end of object is not card aligned - increment to cover
1767         // all the cards spanned by the object
1768         end_idx += 1;
1769       }
1770 
1771       assert(end_idx <= _card_bm->size(),
1772              err_msg("oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1773                      end_idx, _card_bm->size()));
1774       assert(start_idx < _card_bm->size(),
1775              err_msg("oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1776                      start_idx, _card_bm->size()));
1777 
1778       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1779     }
1780 
1781     // Set the bit for the region if it contains live data
1782     if (hr->next_marked_bytes() > 0) {
1783       set_bit_for_region(hr);
1784     }
1785 
1786     return false;
1787   }
1788 };
1789 
1790 class G1ParFinalCountTask: public AbstractGangTask {
1791 protected:
1792   G1CollectedHeap* _g1h;
1793   ConcurrentMark* _cm;
1794   BitMap* _actual_region_bm;
1795   BitMap* _actual_card_bm;


2260   int             _ref_counter_limit;
2261   int             _ref_counter;
2262   bool            _is_serial;
2263  public:
2264   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2265     _cm(cm), _task(task), _is_serial(is_serial),
2266     _ref_counter_limit(G1RefProcDrainInterval) {
2267     assert(_ref_counter_limit > 0, "sanity");
2268     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2269     _ref_counter = _ref_counter_limit;
2270   }
2271 
2272   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2273   virtual void do_oop(      oop* p) { do_oop_work(p); }
2274 
2275   template <class T> void do_oop_work(T* p) {
2276     if (!_cm->has_overflown()) {
2277       oop obj = oopDesc::load_decode_heap_oop(p);
2278       if (_cm->verbose_high()) {
2279         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2280                                "*" PTR_FORMAT " = " PTR_FORMAT,
2281                                _task->worker_id(), p2i(p), p2i((void*) obj));
2282       }
2283 
2284       _task->deal_with_reference(obj);
2285       _ref_counter--;
2286 
2287       if (_ref_counter == 0) {
2288         // We have dealt with _ref_counter_limit references, pushing them
2289         // and objects reachable from them on to the local stack (and
2290         // possibly the global stack). Call CMTask::do_marking_step() to
2291         // process these entries.
2292         //
2293         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2294         // there's nothing more to do (i.e. we're done with the entries that
2295         // were pushed as a result of the CMTask::deal_with_reference() calls
2296         // above) or we overflow.
2297         //
2298         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2299         // flag while there may still be some work to do. (See the comment at
2300         // the beginning of CMTask::do_marking_step() for those conditions -


3001     //      it will skip the subsequent CH regions).
3002     // If it comes across a region that suddenly becomes CH, the
3003     // scenario will be similar to b). So, the race between
3004     // claim_region() and a humongous object allocation might force us
3005     // to do a bit of unnecessary work (due to some unnecessary bitmap
3006     // iterations) but it should not introduce and correctness issues.
3007     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
3008 
3009     // Above heap_region_containing_raw may return NULL as we always scan claim
3010     // until the end of the heap. In this case, just jump to the next region.
3011     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
3012 
3013     // Is the gap between reading the finger and doing the CAS too long?
3014     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
3015     if (res == finger && curr_region != NULL) {
3016       // we succeeded
3017       HeapWord*   bottom        = curr_region->bottom();
3018       HeapWord*   limit         = curr_region->next_top_at_mark_start();
3019 
3020       if (verbose_low()) {
3021         gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " "
3022                                "[" PTR_FORMAT ", " PTR_FORMAT "), "
3023                                "limit = " PTR_FORMAT,
3024                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
3025       }
3026 
3027       // notice that _finger == end cannot be guaranteed here since,
3028       // someone else might have moved the finger even further
3029       assert(_finger >= end, "the finger should have moved forward");
3030 
3031       if (verbose_low()) {
3032         gclog_or_tty->print_cr("[%u] we were successful with region = "
3033                                PTR_FORMAT, worker_id, p2i(curr_region));
3034       }
3035 
3036       if (limit > bottom) {
3037         if (verbose_low()) {
3038           gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is not empty, "
3039                                  "returning it ", worker_id, p2i(curr_region));
3040         }
3041         return curr_region;
3042       } else {
3043         assert(limit == bottom,
3044                "the region limit should be at bottom");
3045         if (verbose_low()) {
3046           gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is empty, "
3047                                  "returning NULL", worker_id, p2i(curr_region));
3048         }
3049         // we return NULL and the caller should try calling
3050         // claim_region() again.
3051         return NULL;
3052       }
3053     } else {
3054       assert(_finger > finger, "the finger should have moved forward");
3055       if (verbose_low()) {
3056         if (curr_region == NULL) {
3057           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
3058                                  "global finger = " PTR_FORMAT ", "
3059                                  "our finger = " PTR_FORMAT,
3060                                  worker_id, p2i(_finger), p2i(finger));
3061         } else {
3062           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3063                                  "global finger = " PTR_FORMAT ", "
3064                                  "our finger = " PTR_FORMAT,
3065                                  worker_id, p2i(_finger), p2i(finger));
3066         }
3067       }
3068 
3069       // read it again
3070       finger = _finger;
3071     }
3072   }
3073 
3074   return NULL;
3075 }
3076 
3077 #ifndef PRODUCT
3078 enum VerifyNoCSetOopsPhase {
3079   VerifyNoCSetOopsStack,
3080   VerifyNoCSetOopsQueues
3081 };
3082 
3083 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
3084 private:
3085   G1CollectedHeap* _g1h;
3086   VerifyNoCSetOopsPhase _phase;
3087   int _info;
3088 
3089   const char* phase_str() {
3090     switch (_phase) {
3091     case VerifyNoCSetOopsStack:         return "Stack";
3092     case VerifyNoCSetOopsQueues:        return "Queue";
3093     default:                            ShouldNotReachHere();
3094     }
3095     return NULL;
3096   }
3097 
3098   void do_object_work(oop obj) {
3099     guarantee(!_g1h->obj_in_cs(obj),
3100               err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
3101                       p2i((void*) obj), phase_str(), _info));
3102   }
3103 
3104 public:
3105   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3106 
3107   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3108     _phase = phase;
3109     _info = info;
3110   }
3111 
3112   virtual void do_oop(oop* p) {
3113     oop obj = oopDesc::load_decode_heap_oop(p);
3114     do_object_work(obj);
3115   }
3116 
3117   virtual void do_oop(narrowOop* p) {
3118     // We should not come across narrow oops while scanning marking
3119     // stacks
3120     ShouldNotReachHere();


3141   for (uint i = 0; i < _max_worker_id; i += 1) {
3142     cl.set_phase(VerifyNoCSetOopsQueues, i);
3143     CMTaskQueue* queue = _task_queues->queue(i);
3144     queue->oops_do(&cl);
3145   }
3146 
3147   // Verify the global finger
3148   HeapWord* global_finger = finger();
3149   if (global_finger != NULL && global_finger < _heap_end) {
3150     // The global finger always points to a heap region boundary. We
3151     // use heap_region_containing_raw() to get the containing region
3152     // given that the global finger could be pointing to a free region
3153     // which subsequently becomes continues humongous. If that
3154     // happens, heap_region_containing() will return the bottom of the
3155     // corresponding starts humongous region and the check below will
3156     // not hold any more.
3157     // Since we always iterate over all regions, we might get a NULL HeapRegion
3158     // here.
3159     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3160     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3161               err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT,
3162                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3163   }
3164 
3165   // Verify the task fingers
3166   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3167   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3168     CMTask* task = _tasks[i];
3169     HeapWord* task_finger = task->finger();
3170     if (task_finger != NULL && task_finger < _heap_end) {
3171       // See above note on the global finger verification.
3172       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3173       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3174                 !task_hr->in_collection_set(),
3175                 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT,
3176                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3177     }
3178   }
3179 }
3180 #endif // PRODUCT
3181 
3182 // Aggregate the counting data that was constructed concurrently
3183 // with marking.
3184 class AggregateCountDataHRClosure: public HeapRegionClosure {
3185   G1CollectedHeap* _g1h;
3186   ConcurrentMark* _cm;
3187   CardTableModRefBS* _ct_bs;
3188   BitMap* _cm_card_bm;
3189   uint _max_worker_id;
3190 
3191  public:
3192   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3193                               BitMap* cm_card_bm,
3194                               uint max_worker_id) :
3195     _g1h(g1h), _cm(g1h->concurrent_mark()),


3197     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3198 
3199   bool doHeapRegion(HeapRegion* hr) {
3200     if (hr->continuesHumongous()) {
3201       // We will ignore these here and process them when their
3202       // associated "starts humongous" region is processed.
3203       // Note that we cannot rely on their associated
3204       // "starts humongous" region to have their bit set to 1
3205       // since, due to the region chunking in the parallel region
3206       // iteration, a "continues humongous" region might be visited
3207       // before its associated "starts humongous".
3208       return false;
3209     }
3210 
3211     HeapWord* start = hr->bottom();
3212     HeapWord* limit = hr->next_top_at_mark_start();
3213     HeapWord* end = hr->end();
3214 
3215     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3216            err_msg("Preconditions not met - "
3217                    "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
3218                    "top: " PTR_FORMAT ", end: " PTR_FORMAT,
3219                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3220 
3221     assert(hr->next_marked_bytes() == 0, "Precondition");
3222 
3223     if (start == limit) {
3224       // NTAMS of this region has not been set so nothing to do.
3225       return false;
3226     }
3227 
3228     // 'start' should be in the heap.
3229     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3230     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3231     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3232 
3233     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3234     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3235     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3236 
3237     // If ntams is not card aligned then we bump card bitmap index
3238     // for limit so that we get the all the cards spanned by


3477   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3478   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3479 }
3480 
3481 // We take a break if someone is trying to stop the world.
3482 bool ConcurrentMark::do_yield_check(uint worker_id) {
3483   if (SuspendibleThreadSet::should_yield()) {
3484     if (worker_id == 0) {
3485       _g1h->g1_policy()->record_concurrent_pause();
3486     }
3487     SuspendibleThreadSet::yield();
3488     return true;
3489   } else {
3490     return false;
3491   }
3492 }
3493 
3494 #ifndef PRODUCT
3495 // for debugging purposes
3496 void ConcurrentMark::print_finger() {
3497   gclog_or_tty->print_cr("heap [" PTR_FORMAT ", " PTR_FORMAT "), global finger = " PTR_FORMAT,
3498                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3499   for (uint i = 0; i < _max_worker_id; ++i) {
3500     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3501   }
3502   gclog_or_tty->cr();
3503 }
3504 #endif
3505 
3506 template<bool scan>
3507 inline void CMTask::process_grey_object(oop obj) {
3508   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3509   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3510 
3511   if (_cm->verbose_high()) {
3512     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3513                            _worker_id, p2i((void*) obj));
3514   }
3515 
3516   size_t obj_size = obj->size();
3517   _words_scanned += obj_size;


3562 
3563 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3564                                ConcurrentMark* cm,
3565                                CMTask* task)
3566   : _g1h(g1h), _cm(cm), _task(task) {
3567   assert(_ref_processor == NULL, "should be initialized to NULL");
3568 
3569   if (G1UseConcMarkReferenceProcessing) {
3570     _ref_processor = g1h->ref_processor_cm();
3571     assert(_ref_processor != NULL, "should not be NULL");
3572   }
3573 }
3574 
3575 void CMTask::setup_for_region(HeapRegion* hr) {
3576   assert(hr != NULL,
3577         "claim_region() should have filtered out NULL regions");
3578   assert(!hr->continuesHumongous(),
3579         "claim_region() should have filtered out continues humongous regions");
3580 
3581   if (_cm->verbose_low()) {
3582     gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT,
3583                            _worker_id, p2i(hr));
3584   }
3585 
3586   _curr_region  = hr;
3587   _finger       = hr->bottom();
3588   update_region_limit();
3589 }
3590 
3591 void CMTask::update_region_limit() {
3592   HeapRegion* hr            = _curr_region;
3593   HeapWord* bottom          = hr->bottom();
3594   HeapWord* limit           = hr->next_top_at_mark_start();
3595 
3596   if (limit == bottom) {
3597     if (_cm->verbose_low()) {
3598       gclog_or_tty->print_cr("[%u] found an empty region "
3599                              "[" PTR_FORMAT ", " PTR_FORMAT ")",
3600                              _worker_id, p2i(bottom), p2i(limit));
3601     }
3602     // The region was collected underneath our feet.
3603     // We set the finger to bottom to ensure that the bitmap
3604     // iteration that will follow this will not do anything.
3605     // (this is not a condition that holds when we set the region up,
3606     // as the region is not supposed to be empty in the first place)
3607     _finger = bottom;
3608   } else if (limit >= _region_limit) {
3609     assert(limit >= _finger, "peace of mind");
3610   } else {
3611     assert(limit < _region_limit, "only way to get here");
3612     // This can happen under some pretty unusual circumstances.  An
3613     // evacuation pause empties the region underneath our feet (NTAMS
3614     // at bottom). We then do some allocation in the region (NTAMS
3615     // stays at bottom), followed by the region being used as a GC
3616     // alloc region (NTAMS will move to top() and the objects
3617     // originally below it will be grayed). All objects now marked in
3618     // the region are explicitly grayed, if below the global finger,
3619     // and we do not need in fact to scan anything else. So, we simply
3620     // set _finger to be limit to ensure that the bitmap iteration
3621     // doesn't do anything.
3622     _finger = limit;
3623   }
3624 
3625   _region_limit = limit;
3626 }
3627 
3628 void CMTask::giveup_current_region() {
3629   assert(_curr_region != NULL, "invariant");
3630   if (_cm->verbose_low()) {
3631     gclog_or_tty->print_cr("[%u] giving up region " PTR_FORMAT,
3632                            _worker_id, p2i(_curr_region));
3633   }
3634   clear_region_fields();
3635 }
3636 
3637 void CMTask::clear_region_fields() {
3638   // Values for these three fields that indicate that we're not
3639   // holding on to a region.
3640   _curr_region   = NULL;
3641   _finger        = NULL;
3642   _region_limit  = NULL;
3643 }
3644 
3645 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3646   if (cm_oop_closure == NULL) {
3647     assert(_cm_oop_closure != NULL, "invariant");
3648   } else {
3649     assert(_cm_oop_closure == NULL, "invariant");
3650   }
3651   _cm_oop_closure = cm_oop_closure;


3733     return;
3734   }
3735 
3736   double curr_time_ms = os::elapsedVTime() * 1000.0;
3737 
3738   // (3) If marking stats are enabled, then we update the step history.
3739 #if _MARKING_STATS_
3740   if (_words_scanned >= _words_scanned_limit) {
3741     ++_clock_due_to_scanning;
3742   }
3743   if (_refs_reached >= _refs_reached_limit) {
3744     ++_clock_due_to_marking;
3745   }
3746 
3747   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3748   _interval_start_time_ms = curr_time_ms;
3749   _all_clock_intervals_ms.add(last_interval_ms);
3750 
3751   if (_cm->verbose_medium()) {
3752       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3753                         "scanned = " SIZE_FORMAT "%s, refs reached = " SIZE_FORMAT "%s",
3754                         _worker_id, last_interval_ms,
3755                         _words_scanned,
3756                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3757                         _refs_reached,
3758                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3759   }
3760 #endif // _MARKING_STATS_
3761 
3762   // (4) We check whether we should yield. If we have to, then we abort.
3763   if (SuspendibleThreadSet::should_yield()) {
3764     // We should yield. To do this we abort the task. The caller is
3765     // responsible for yielding.
3766     set_has_aborted();
3767     statsOnly( ++_aborted_yield );
3768     return;
3769   }
3770 
3771   // (5) We check whether we've reached our time quota. If we have,
3772   // then we abort.
3773   double elapsed_time_ms = curr_time_ms - _start_time_ms;


3902   // of things to do) or totally (at the very end).
3903   size_t target_size;
3904   if (partially) {
3905     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3906   } else {
3907     target_size = 0;
3908   }
3909 
3910   if (_task_queue->size() > target_size) {
3911     if (_cm->verbose_high()) {
3912       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3913                              _worker_id, target_size);
3914     }
3915 
3916     oop obj;
3917     bool ret = _task_queue->pop_local(obj);
3918     while (ret) {
3919       statsOnly( ++_local_pops );
3920 
3921       if (_cm->verbose_high()) {
3922         gclog_or_tty->print_cr("[%u] popped " PTR_FORMAT, _worker_id,
3923                                p2i((void*) obj));
3924       }
3925 
3926       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3927       assert(!_g1h->is_on_master_free_list(
3928                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3929 
3930       scan_object(obj);
3931 
3932       if (_task_queue->size() <= target_size || has_aborted()) {
3933         ret = false;
3934       } else {
3935         ret = _task_queue->pop_local(obj);
3936       }
3937     }
3938 
3939     if (_cm->verbose_high()) {
3940       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3941                              _worker_id, _task_queue->size());
3942     }


4255       // This means that we're already holding on to a region.
4256       assert(_finger != NULL, "if region is not NULL, then the finger "
4257              "should not be NULL either");
4258 
4259       // We might have restarted this task after an evacuation pause
4260       // which might have evacuated the region we're holding on to
4261       // underneath our feet. Let's read its limit again to make sure
4262       // that we do not iterate over a region of the heap that
4263       // contains garbage (update_region_limit() will also move
4264       // _finger to the start of the region if it is found empty).
4265       update_region_limit();
4266       // We will start from _finger not from the start of the region,
4267       // as we might be restarting this task after aborting half-way
4268       // through scanning this region. In this case, _finger points to
4269       // the address where we last found a marked object. If this is a
4270       // fresh region, _finger points to start().
4271       MemRegion mr = MemRegion(_finger, _region_limit);
4272 
4273       if (_cm->verbose_low()) {
4274         gclog_or_tty->print_cr("[%u] we're scanning part "
4275                                "[" PTR_FORMAT ", " PTR_FORMAT ") "
4276                                "of region " HR_FORMAT,
4277                                _worker_id, p2i(_finger), p2i(_region_limit),
4278                                HR_FORMAT_PARAMS(_curr_region));
4279       }
4280 
4281       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4282              "humongous regions should go around loop once only");
4283 
4284       // Some special cases:
4285       // If the memory region is empty, we can just give up the region.
4286       // If the current region is humongous then we only need to check
4287       // the bitmap for the bit associated with the start of the object,
4288       // scan the object if it's live, and give up the region.
4289       // Otherwise, let's iterate over the bitmap of the part of the region
4290       // that is left.
4291       // If the iteration is successful, give up the region.
4292       if (mr.is_empty()) {
4293         giveup_current_region();
4294         regular_clock_call();
4295       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4296         if (_nextMarkBitMap->isMarked(mr.start())) {


4343     // return NULL with potentially more regions available for
4344     // claiming and why we have to check out_of_regions() to determine
4345     // whether we're done or not.
4346     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4347       // We are going to try to claim a new region. We should have
4348       // given up on the previous one.
4349       // Separated the asserts so that we know which one fires.
4350       assert(_curr_region  == NULL, "invariant");
4351       assert(_finger       == NULL, "invariant");
4352       assert(_region_limit == NULL, "invariant");
4353       if (_cm->verbose_low()) {
4354         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4355       }
4356       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4357       if (claimed_region != NULL) {
4358         // Yes, we managed to claim one
4359         statsOnly( ++_regions_claimed );
4360 
4361         if (_cm->verbose_low()) {
4362           gclog_or_tty->print_cr("[%u] we successfully claimed "
4363                                  "region " PTR_FORMAT,
4364                                  _worker_id, p2i(claimed_region));
4365         }
4366 
4367         setup_for_region(claimed_region);
4368         assert(_curr_region == claimed_region, "invariant");
4369       }
4370       // It is important to call the regular clock here. It might take
4371       // a while to claim a region if, for example, we hit a large
4372       // block of empty regions. So we need to call the regular clock
4373       // method once round the loop to make sure it's called
4374       // frequently enough.
4375       regular_clock_call();
4376     }
4377 
4378     if (!has_aborted() && _curr_region == NULL) {
4379       assert(_cm->out_of_regions(),
4380              "at this point we should be out of regions");
4381     }
4382   } while ( _curr_region != NULL && !has_aborted());
4383 


4404   // Attempt at work stealing from other task's queues.
4405   if (do_stealing && !has_aborted()) {
4406     // We have not aborted. This means that we have finished all that
4407     // we could. Let's try to do some stealing...
4408 
4409     // We cannot check whether the global stack is empty, since other
4410     // tasks might be pushing objects to it concurrently.
4411     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4412            "only way to reach here");
4413 
4414     if (_cm->verbose_low()) {
4415       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4416     }
4417 
4418     while (!has_aborted()) {
4419       oop obj;
4420       statsOnly( ++_steal_attempts );
4421 
4422       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4423         if (_cm->verbose_medium()) {
4424           gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully",
4425                                  _worker_id, p2i((void*) obj));
4426         }
4427 
4428         statsOnly( ++_steals );
4429 
4430         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4431                "any stolen object should be marked");
4432         scan_object(obj);
4433 
4434         // And since we're towards the end, let's totally drain the
4435         // local queue and global stack.
4436         drain_local_queue(false);
4437         drain_global_stack(false);
4438       } else {
4439         break;
4440       }
4441     }
4442   }
4443 
4444   // If we are about to wrap up and go into termination, check if we


4612   guarantee(task_queue != NULL, "invariant");
4613   guarantee(task_queues != NULL, "invariant");
4614 
4615   statsOnly( _clock_due_to_scanning = 0;
4616              _clock_due_to_marking  = 0 );
4617 
4618   _marking_step_diffs_ms.add(0.5);
4619 }
4620 
4621 // These are formatting macros that are used below to ensure
4622 // consistent formatting. The *_H_* versions are used to format the
4623 // header for a particular value and they should be kept consistent
4624 // with the corresponding macro. Also note that most of the macros add
4625 // the necessary white space (as a prefix) which makes them a bit
4626 // easier to compose.
4627 
4628 // All the output lines are prefixed with this string to be able to
4629 // identify them easily in a large log file.
4630 #define G1PPRL_LINE_PREFIX            "###"
4631 
4632 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
4633 #ifdef _LP64
4634 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4635 #else // _LP64
4636 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4637 #endif // _LP64
4638 
4639 // For per-region info
4640 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4641 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4642 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
4643 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4644 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4645 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4646 
4647 // For summary info
4648 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
4649 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
4650 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
4651 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
4652 
4653 G1PrintRegionLivenessInfoClosure::
4654 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4655   : _out(out),
4656     _total_used_bytes(0), _total_capacity_bytes(0),
4657     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4658     _hum_used_bytes(0), _hum_capacity_bytes(0),
4659     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4660     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4661   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4662   MemRegion g1_reserved = g1h->g1_reserved();
4663   double now = os::elapsedTime();
4664 
4665   // Print the header of the output.
4666   _out->cr();
4667   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4668   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4669                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4670                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4671                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),


< prev index next >