< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 584 
 585   // We should only get here after the first-level allocation attempt
 586   // (attempt_allocation()) failed to allocate.
 587 
 588   // We will loop until a) we manage to successfully perform the
 589   // allocation or b) we successfully schedule a collection which
 590   // fails to perform the allocation. b) is the only case when we'll
 591   // return NULL.
 592   HeapWord* result = NULL;
 593   for (int try_count = 1; /* we'll return */; try_count += 1) {
 594     bool should_try_gc;
 595     uint gc_count_before;
 596 
 597     {
 598       MutexLockerEx x(Heap_lock);
 599       result = _allocator->attempt_allocation_locked(word_size, context);
 600       if (result != NULL) {
 601         return result;
 602       }
 603 
 604       if (GC_locker::is_active_and_needs_gc()) {
 605         if (g1_policy()->can_expand_young_list()) {
 606           // No need for an ergo verbose message here,
 607           // can_expand_young_list() does this when it returns true.
 608           result = _allocator->attempt_allocation_force(word_size, context);
 609           if (result != NULL) {
 610             return result;
 611           }
 612         }
 613         should_try_gc = false;
 614       } else {
 615         // The GCLocker may not be active but the GCLocker initiated
 616         // GC may not yet have been performed (GCLocker::needs_gc()
 617         // returns true). In this case we do not try this GC and
 618         // wait until the GCLocker initiated GC is performed, and
 619         // then retry the allocation.
 620         if (GC_locker::needs_gc()) {
 621           should_try_gc = false;
 622         } else {
 623           // Read the GC count while still holding the Heap_lock.
 624           gc_count_before = total_collections();
 625           should_try_gc = true;
 626         }
 627       }
 628     }
 629 
 630     if (should_try_gc) {
 631       bool succeeded;
 632       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 633                                    GCCause::_g1_inc_collection_pause);
 634       if (result != NULL) {
 635         assert(succeeded, "only way to get back a non-NULL result");
 636         return result;
 637       }
 638 
 639       if (succeeded) {
 640         // If we get here we successfully scheduled a collection which
 641         // failed to allocate. No point in trying to allocate
 642         // further. We'll just return NULL.
 643         MutexLockerEx x(Heap_lock);
 644         *gc_count_before_ret = total_collections();
 645         return NULL;
 646       }
 647     } else {
 648       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 649         MutexLockerEx x(Heap_lock);
 650         *gc_count_before_ret = total_collections();
 651         return NULL;
 652       }
 653       // The GCLocker is either active or the GCLocker initiated
 654       // GC has not yet been performed. Stall until it is and
 655       // then retry the allocation.
 656       GC_locker::stall_until_clear();
 657       (*gclocker_retry_count_ret) += 1;
 658     }
 659 
 660     // We can reach here if we were unsuccessful in scheduling a
 661     // collection (because another thread beat us to it) or if we were
 662     // stalled due to the GC locker. In either can we should retry the
 663     // allocation attempt in case another thread successfully
 664     // performed a collection and reclaimed enough space. We do the
 665     // first attempt (without holding the Heap_lock) here and the
 666     // follow-on attempt will be at the start of the next loop
 667     // iteration (after taking the Heap_lock).
 668     result = _allocator->attempt_allocation(word_size, context);
 669     if (result != NULL) {
 670       return result;
 671     }
 672 
 673     // Give a warning if we seem to be looping forever.
 674     if ((QueuedAllocationWarningCount > 0) &&
 675         (try_count % QueuedAllocationWarningCount == 0)) {
 676       warning("G1CollectedHeap::attempt_allocation_slow() "


1011   // fails to perform the allocation. b) is the only case when we'll
1012   // return NULL.
1013   HeapWord* result = NULL;
1014   for (int try_count = 1; /* we'll return */; try_count += 1) {
1015     bool should_try_gc;
1016     uint gc_count_before;
1017 
1018     {
1019       MutexLockerEx x(Heap_lock);
1020 
1021       // Given that humongous objects are not allocated in young
1022       // regions, we'll first try to do the allocation without doing a
1023       // collection hoping that there's enough space in the heap.
1024       result = humongous_obj_allocate(word_size, AllocationContext::current());
1025       if (result != NULL) {
1026         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
1027         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
1028         return result;
1029       }
1030 
1031       if (GC_locker::is_active_and_needs_gc()) {
1032         should_try_gc = false;
1033       } else {
1034          // The GCLocker may not be active but the GCLocker initiated
1035         // GC may not yet have been performed (GCLocker::needs_gc()
1036         // returns true). In this case we do not try this GC and
1037         // wait until the GCLocker initiated GC is performed, and
1038         // then retry the allocation.
1039         if (GC_locker::needs_gc()) {
1040           should_try_gc = false;
1041         } else {
1042           // Read the GC count while still holding the Heap_lock.
1043           gc_count_before = total_collections();
1044           should_try_gc = true;
1045         }
1046       }
1047     }
1048 
1049     if (should_try_gc) {
1050       // If we failed to allocate the humongous object, we should try to
1051       // do a collection pause (if we're allowed) in case it reclaims
1052       // enough space for the allocation to succeed after the pause.
1053 
1054       bool succeeded;
1055       result = do_collection_pause(word_size, gc_count_before, &succeeded,
1056                                    GCCause::_g1_humongous_allocation);
1057       if (result != NULL) {
1058         assert(succeeded, "only way to get back a non-NULL result");
1059         return result;
1060       }
1061 
1062       if (succeeded) {
1063         // If we get here we successfully scheduled a collection which
1064         // failed to allocate. No point in trying to allocate
1065         // further. We'll just return NULL.
1066         MutexLockerEx x(Heap_lock);
1067         *gc_count_before_ret = total_collections();
1068         return NULL;
1069       }
1070     } else {
1071       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1072         MutexLockerEx x(Heap_lock);
1073         *gc_count_before_ret = total_collections();
1074         return NULL;
1075       }
1076       // The GCLocker is either active or the GCLocker initiated
1077       // GC has not yet been performed. Stall until it is and
1078       // then retry the allocation.
1079       GC_locker::stall_until_clear();
1080       (*gclocker_retry_count_ret) += 1;
1081     }
1082 
1083     // We can reach here if we were unsuccessful in scheduling a
1084     // collection (because another thread beat us to it) or if we were
1085     // stalled due to the GC locker. In either can we should retry the
1086     // allocation attempt in case another thread successfully
1087     // performed a collection and reclaimed enough space.  Give a
1088     // warning if we seem to be looping forever.
1089 
1090     if ((QueuedAllocationWarningCount > 0) &&
1091         (try_count % QueuedAllocationWarningCount == 0)) {
1092       warning("G1CollectedHeap::attempt_allocation_humongous() "
1093               "retries %d times", try_count);
1094     }
1095   }
1096 
1097   ShouldNotReachHere();
1098   return NULL;
1099 }


1194     _hr_printer->post_compaction(hr);
1195     return false;
1196   }
1197 
1198   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1199     : _hr_printer(hr_printer) { }
1200 };
1201 
1202 void G1CollectedHeap::print_hrm_post_compaction() {
1203   if (_hr_printer.is_active()) {
1204     PostCompactionPrinterClosure cl(hr_printer());
1205     heap_region_iterate(&cl);
1206   }
1207 
1208 }
1209 
1210 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1211                                          bool clear_all_soft_refs) {
1212   assert_at_safepoint(true /* should_be_vm_thread */);
1213 
1214   if (GC_locker::check_active_before_gc()) {
1215     return false;
1216   }
1217 
1218   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1219   gc_timer->register_gc_start();
1220 
1221   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1222   GCIdMark gc_id_mark;
1223   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1224 
1225   SvcGCMarker sgcm(SvcGCMarker::FULL);
1226   ResourceMark rm;
1227 
1228   print_heap_before_gc();
1229   trace_heap_before_gc(gc_tracer);
1230 
1231   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1232 
1233   verify_region_sets_optional();
1234 


2379       // concurrent cycle. We're setting word_size to 0 which means that
2380       // we are not requesting a post-GC allocation.
2381       VM_G1IncCollectionPause op(gc_count_before,
2382                                  0,     /* word_size */
2383                                  true,  /* should_initiate_conc_mark */
2384                                  g1_policy()->max_pause_time_ms(),
2385                                  cause);
2386       op.set_allocation_context(AllocationContext::current());
2387 
2388       VMThread::execute(&op);
2389       if (!op.pause_succeeded()) {
2390         if (old_marking_count_before == _old_marking_cycles_started) {
2391           retry_gc = op.should_retry_gc();
2392         } else {
2393           // A Full GC happened while we were trying to schedule the
2394           // initial-mark GC. No point in starting a new cycle given
2395           // that the whole heap was collected anyway.
2396         }
2397 
2398         if (retry_gc) {
2399           if (GC_locker::is_active_and_needs_gc()) {
2400             GC_locker::stall_until_clear();
2401           }
2402         }
2403       }
2404     } else {
2405       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2406           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2407 
2408         // Schedule a standard evacuation pause. We're setting word_size
2409         // to 0 which means that we are not requesting a post-GC allocation.
2410         VM_G1IncCollectionPause op(gc_count_before,
2411                                    0,     /* word_size */
2412                                    false, /* should_initiate_conc_mark */
2413                                    g1_policy()->max_pause_time_ms(),
2414                                    cause);
2415         VMThread::execute(&op);
2416       } else {
2417         // Schedule a Full GC.
2418         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2419         VMThread::execute(&op);
2420       }


3612 void G1CollectedHeap::wait_for_root_region_scanning() {
3613   double scan_wait_start = os::elapsedTime();
3614   // We have to wait until the CM threads finish scanning the
3615   // root regions as it's the only way to ensure that all the
3616   // objects on them have been correctly scanned before we start
3617   // moving them during the GC.
3618   bool waited = _cm->root_regions()->wait_until_scan_finished();
3619   double wait_time_ms = 0.0;
3620   if (waited) {
3621     double scan_wait_end = os::elapsedTime();
3622     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3623   }
3624   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3625 }
3626 
3627 bool
3628 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3629   assert_at_safepoint(true /* should_be_vm_thread */);
3630   guarantee(!is_gc_active(), "collection is not reentrant");
3631 
3632   if (GC_locker::check_active_before_gc()) {
3633     return false;
3634   }
3635 
3636   _gc_timer_stw->register_gc_start();
3637 
3638   GCIdMark gc_id_mark;
3639   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3640 
3641   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3642   ResourceMark rm;
3643 
3644   wait_for_root_region_scanning();
3645 
3646   print_heap_before_gc();
3647   trace_heap_before_gc(_gc_tracer_stw);
3648 
3649   verify_region_sets_optional();
3650   verify_dirty_young_regions();
3651 
3652   // This call will decide whether this pause is an initial-mark




 584 
 585   // We should only get here after the first-level allocation attempt
 586   // (attempt_allocation()) failed to allocate.
 587 
 588   // We will loop until a) we manage to successfully perform the
 589   // allocation or b) we successfully schedule a collection which
 590   // fails to perform the allocation. b) is the only case when we'll
 591   // return NULL.
 592   HeapWord* result = NULL;
 593   for (int try_count = 1; /* we'll return */; try_count += 1) {
 594     bool should_try_gc;
 595     uint gc_count_before;
 596 
 597     {
 598       MutexLockerEx x(Heap_lock);
 599       result = _allocator->attempt_allocation_locked(word_size, context);
 600       if (result != NULL) {
 601         return result;
 602       }
 603 
 604       if (GCLocker::is_active_and_needs_gc()) {
 605         if (g1_policy()->can_expand_young_list()) {
 606           // No need for an ergo verbose message here,
 607           // can_expand_young_list() does this when it returns true.
 608           result = _allocator->attempt_allocation_force(word_size, context);
 609           if (result != NULL) {
 610             return result;
 611           }
 612         }
 613         should_try_gc = false;
 614       } else {
 615         // The GCLocker may not be active but the GCLocker initiated
 616         // GC may not yet have been performed (GCLocker::needs_gc()
 617         // returns true). In this case we do not try this GC and
 618         // wait until the GCLocker initiated GC is performed, and
 619         // then retry the allocation.
 620         if (GCLocker::needs_gc()) {
 621           should_try_gc = false;
 622         } else {
 623           // Read the GC count while still holding the Heap_lock.
 624           gc_count_before = total_collections();
 625           should_try_gc = true;
 626         }
 627       }
 628     }
 629 
 630     if (should_try_gc) {
 631       bool succeeded;
 632       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 633                                    GCCause::_g1_inc_collection_pause);
 634       if (result != NULL) {
 635         assert(succeeded, "only way to get back a non-NULL result");
 636         return result;
 637       }
 638 
 639       if (succeeded) {
 640         // If we get here we successfully scheduled a collection which
 641         // failed to allocate. No point in trying to allocate
 642         // further. We'll just return NULL.
 643         MutexLockerEx x(Heap_lock);
 644         *gc_count_before_ret = total_collections();
 645         return NULL;
 646       }
 647     } else {
 648       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 649         MutexLockerEx x(Heap_lock);
 650         *gc_count_before_ret = total_collections();
 651         return NULL;
 652       }
 653       // The GCLocker is either active or the GCLocker initiated
 654       // GC has not yet been performed. Stall until it is and
 655       // then retry the allocation.
 656       GCLocker::stall_until_clear();
 657       (*gclocker_retry_count_ret) += 1;
 658     }
 659 
 660     // We can reach here if we were unsuccessful in scheduling a
 661     // collection (because another thread beat us to it) or if we were
 662     // stalled due to the GC locker. In either can we should retry the
 663     // allocation attempt in case another thread successfully
 664     // performed a collection and reclaimed enough space. We do the
 665     // first attempt (without holding the Heap_lock) here and the
 666     // follow-on attempt will be at the start of the next loop
 667     // iteration (after taking the Heap_lock).
 668     result = _allocator->attempt_allocation(word_size, context);
 669     if (result != NULL) {
 670       return result;
 671     }
 672 
 673     // Give a warning if we seem to be looping forever.
 674     if ((QueuedAllocationWarningCount > 0) &&
 675         (try_count % QueuedAllocationWarningCount == 0)) {
 676       warning("G1CollectedHeap::attempt_allocation_slow() "


1011   // fails to perform the allocation. b) is the only case when we'll
1012   // return NULL.
1013   HeapWord* result = NULL;
1014   for (int try_count = 1; /* we'll return */; try_count += 1) {
1015     bool should_try_gc;
1016     uint gc_count_before;
1017 
1018     {
1019       MutexLockerEx x(Heap_lock);
1020 
1021       // Given that humongous objects are not allocated in young
1022       // regions, we'll first try to do the allocation without doing a
1023       // collection hoping that there's enough space in the heap.
1024       result = humongous_obj_allocate(word_size, AllocationContext::current());
1025       if (result != NULL) {
1026         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
1027         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
1028         return result;
1029       }
1030 
1031       if (GCLocker::is_active_and_needs_gc()) {
1032         should_try_gc = false;
1033       } else {
1034          // The GCLocker may not be active but the GCLocker initiated
1035         // GC may not yet have been performed (GCLocker::needs_gc()
1036         // returns true). In this case we do not try this GC and
1037         // wait until the GCLocker initiated GC is performed, and
1038         // then retry the allocation.
1039         if (GCLocker::needs_gc()) {
1040           should_try_gc = false;
1041         } else {
1042           // Read the GC count while still holding the Heap_lock.
1043           gc_count_before = total_collections();
1044           should_try_gc = true;
1045         }
1046       }
1047     }
1048 
1049     if (should_try_gc) {
1050       // If we failed to allocate the humongous object, we should try to
1051       // do a collection pause (if we're allowed) in case it reclaims
1052       // enough space for the allocation to succeed after the pause.
1053 
1054       bool succeeded;
1055       result = do_collection_pause(word_size, gc_count_before, &succeeded,
1056                                    GCCause::_g1_humongous_allocation);
1057       if (result != NULL) {
1058         assert(succeeded, "only way to get back a non-NULL result");
1059         return result;
1060       }
1061 
1062       if (succeeded) {
1063         // If we get here we successfully scheduled a collection which
1064         // failed to allocate. No point in trying to allocate
1065         // further. We'll just return NULL.
1066         MutexLockerEx x(Heap_lock);
1067         *gc_count_before_ret = total_collections();
1068         return NULL;
1069       }
1070     } else {
1071       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1072         MutexLockerEx x(Heap_lock);
1073         *gc_count_before_ret = total_collections();
1074         return NULL;
1075       }
1076       // The GCLocker is either active or the GCLocker initiated
1077       // GC has not yet been performed. Stall until it is and
1078       // then retry the allocation.
1079       GCLocker::stall_until_clear();
1080       (*gclocker_retry_count_ret) += 1;
1081     }
1082 
1083     // We can reach here if we were unsuccessful in scheduling a
1084     // collection (because another thread beat us to it) or if we were
1085     // stalled due to the GC locker. In either can we should retry the
1086     // allocation attempt in case another thread successfully
1087     // performed a collection and reclaimed enough space.  Give a
1088     // warning if we seem to be looping forever.
1089 
1090     if ((QueuedAllocationWarningCount > 0) &&
1091         (try_count % QueuedAllocationWarningCount == 0)) {
1092       warning("G1CollectedHeap::attempt_allocation_humongous() "
1093               "retries %d times", try_count);
1094     }
1095   }
1096 
1097   ShouldNotReachHere();
1098   return NULL;
1099 }


1194     _hr_printer->post_compaction(hr);
1195     return false;
1196   }
1197 
1198   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1199     : _hr_printer(hr_printer) { }
1200 };
1201 
1202 void G1CollectedHeap::print_hrm_post_compaction() {
1203   if (_hr_printer.is_active()) {
1204     PostCompactionPrinterClosure cl(hr_printer());
1205     heap_region_iterate(&cl);
1206   }
1207 
1208 }
1209 
1210 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1211                                          bool clear_all_soft_refs) {
1212   assert_at_safepoint(true /* should_be_vm_thread */);
1213 
1214   if (GCLocker::check_active_before_gc()) {
1215     return false;
1216   }
1217 
1218   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1219   gc_timer->register_gc_start();
1220 
1221   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1222   GCIdMark gc_id_mark;
1223   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1224 
1225   SvcGCMarker sgcm(SvcGCMarker::FULL);
1226   ResourceMark rm;
1227 
1228   print_heap_before_gc();
1229   trace_heap_before_gc(gc_tracer);
1230 
1231   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1232 
1233   verify_region_sets_optional();
1234 


2379       // concurrent cycle. We're setting word_size to 0 which means that
2380       // we are not requesting a post-GC allocation.
2381       VM_G1IncCollectionPause op(gc_count_before,
2382                                  0,     /* word_size */
2383                                  true,  /* should_initiate_conc_mark */
2384                                  g1_policy()->max_pause_time_ms(),
2385                                  cause);
2386       op.set_allocation_context(AllocationContext::current());
2387 
2388       VMThread::execute(&op);
2389       if (!op.pause_succeeded()) {
2390         if (old_marking_count_before == _old_marking_cycles_started) {
2391           retry_gc = op.should_retry_gc();
2392         } else {
2393           // A Full GC happened while we were trying to schedule the
2394           // initial-mark GC. No point in starting a new cycle given
2395           // that the whole heap was collected anyway.
2396         }
2397 
2398         if (retry_gc) {
2399           if (GCLocker::is_active_and_needs_gc()) {
2400             GCLocker::stall_until_clear();
2401           }
2402         }
2403       }
2404     } else {
2405       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2406           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2407 
2408         // Schedule a standard evacuation pause. We're setting word_size
2409         // to 0 which means that we are not requesting a post-GC allocation.
2410         VM_G1IncCollectionPause op(gc_count_before,
2411                                    0,     /* word_size */
2412                                    false, /* should_initiate_conc_mark */
2413                                    g1_policy()->max_pause_time_ms(),
2414                                    cause);
2415         VMThread::execute(&op);
2416       } else {
2417         // Schedule a Full GC.
2418         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2419         VMThread::execute(&op);
2420       }


3612 void G1CollectedHeap::wait_for_root_region_scanning() {
3613   double scan_wait_start = os::elapsedTime();
3614   // We have to wait until the CM threads finish scanning the
3615   // root regions as it's the only way to ensure that all the
3616   // objects on them have been correctly scanned before we start
3617   // moving them during the GC.
3618   bool waited = _cm->root_regions()->wait_until_scan_finished();
3619   double wait_time_ms = 0.0;
3620   if (waited) {
3621     double scan_wait_end = os::elapsedTime();
3622     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3623   }
3624   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3625 }
3626 
3627 bool
3628 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3629   assert_at_safepoint(true /* should_be_vm_thread */);
3630   guarantee(!is_gc_active(), "collection is not reentrant");
3631 
3632   if (GCLocker::check_active_before_gc()) {
3633     return false;
3634   }
3635 
3636   _gc_timer_stw->register_gc_start();
3637 
3638   GCIdMark gc_id_mark;
3639   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3640 
3641   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3642   ResourceMark rm;
3643 
3644   wait_for_root_region_scanning();
3645 
3646   print_heap_before_gc();
3647   trace_heap_before_gc(_gc_tracer_stw);
3648 
3649   verify_region_sets_optional();
3650   verify_dirty_young_regions();
3651 
3652   // This call will decide whether this pause is an initial-mark


< prev index next >