< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 11545 : [mq]: 8159978-collection-set-as-array
rev 11546 : [mq]: 8159978-erikh-review


2401   IterateObjectClosureRegionClosure blk(cl);
2402   heap_region_iterate(&blk);
2403 }
2404 
2405 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2406   _hrm.iterate(cl);
2407 }
2408 
2409 void
2410 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2411                                          uint worker_id,
2412                                          HeapRegionClaimer *hrclaimer,
2413                                          bool concurrent) const {
2414   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2415 }
2416 
2417 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2418   _collection_set.iterate(cl);
2419 }
2420 
2421 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id, uint active_workers) {
2422   _collection_set.iterate_from(cl, worker_id, active_workers);
2423 }
2424 
2425 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2426   HeapRegion* result = _hrm.next_region_in_heap(from);
2427   while (result != NULL && result->is_pinned()) {
2428     result = _hrm.next_region_in_heap(result);
2429   }
2430   return result;
2431 }
2432 
2433 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2434   HeapRegion* hr = heap_region_containing(addr);
2435   return hr->block_start(addr);
2436 }
2437 
2438 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2439   HeapRegion* hr = heap_region_containing(addr);
2440   return hr->block_size(addr);
2441 }
2442 


2970   // root regions as it's the only way to ensure that all the
2971   // objects on them have been correctly scanned before we start
2972   // moving them during the GC.
2973   bool waited = _cm->root_regions()->wait_until_scan_finished();
2974   double wait_time_ms = 0.0;
2975   if (waited) {
2976     double scan_wait_end = os::elapsedTime();
2977     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2978   }
2979   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2980 }
2981 
2982 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2983 public:
2984   virtual bool doHeapRegion(HeapRegion* r) {
2985     G1CollectedHeap::heap()->hr_printer()->cset(r);
2986     return false;
2987   }
2988 };
2989 
2990 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {

2991   assert_at_safepoint(true /* should_be_vm_thread */);
2992   guarantee(!is_gc_active(), "collection is not reentrant");
2993 
2994   if (GCLocker::check_active_before_gc()) {
2995     return false;
2996   }
2997 
2998   _gc_timer_stw->register_gc_start();
2999 
3000   GCIdMark gc_id_mark;
3001   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3002 
3003   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3004   ResourceMark rm;
3005 
3006   g1_policy()->note_gc_start();
3007 
3008   wait_for_root_region_scanning();
3009 
3010   print_heap_before_gc();




2401   IterateObjectClosureRegionClosure blk(cl);
2402   heap_region_iterate(&blk);
2403 }
2404 
2405 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2406   _hrm.iterate(cl);
2407 }
2408 
2409 void
2410 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2411                                          uint worker_id,
2412                                          HeapRegionClaimer *hrclaimer,
2413                                          bool concurrent) const {
2414   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2415 }
2416 
2417 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2418   _collection_set.iterate(cl);
2419 }
2420 
2421 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2422   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2423 }
2424 
2425 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2426   HeapRegion* result = _hrm.next_region_in_heap(from);
2427   while (result != NULL && result->is_pinned()) {
2428     result = _hrm.next_region_in_heap(result);
2429   }
2430   return result;
2431 }
2432 
2433 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2434   HeapRegion* hr = heap_region_containing(addr);
2435   return hr->block_start(addr);
2436 }
2437 
2438 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2439   HeapRegion* hr = heap_region_containing(addr);
2440   return hr->block_size(addr);
2441 }
2442 


2970   // root regions as it's the only way to ensure that all the
2971   // objects on them have been correctly scanned before we start
2972   // moving them during the GC.
2973   bool waited = _cm->root_regions()->wait_until_scan_finished();
2974   double wait_time_ms = 0.0;
2975   if (waited) {
2976     double scan_wait_end = os::elapsedTime();
2977     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2978   }
2979   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2980 }
2981 
2982 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2983 public:
2984   virtual bool doHeapRegion(HeapRegion* r) {
2985     G1CollectedHeap::heap()->hr_printer()->cset(r);
2986     return false;
2987   }
2988 };
2989 
2990 bool
2991 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2992   assert_at_safepoint(true /* should_be_vm_thread */);
2993   guarantee(!is_gc_active(), "collection is not reentrant");
2994 
2995   if (GCLocker::check_active_before_gc()) {
2996     return false;
2997   }
2998 
2999   _gc_timer_stw->register_gc_start();
3000 
3001   GCIdMark gc_id_mark;
3002   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3003 
3004   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3005   ResourceMark rm;
3006 
3007   g1_policy()->note_gc_start();
3008 
3009   wait_for_root_region_scanning();
3010 
3011   print_heap_before_gc();


< prev index next >