< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 57997 : [mq]: 8238854-remove-superfluous-alloc-checks
rev 57998 : [mq]: softmaxheapsize2


 354     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 355     // are lucky enough to find some.
 356     first = _hrm->find_contiguous_only_empty(obj_regions);
 357     if (first != G1_NO_HRM_INDEX) {
 358       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 359     }
 360   }
 361 
 362   if (first == G1_NO_HRM_INDEX) {
 363     // Policy: We could not find enough regions for the humongous object in the
 364     // free list. Look through the heap to find a mix of free and uncommitted regions.
 365     // If so, try expansion.
 366     first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
 367     if (first != G1_NO_HRM_INDEX) {
 368       // We found something. Make sure these regions are committed, i.e. expand
 369       // the heap. Alternatively we could do a defragmentation GC.
 370       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 371                                     word_size * HeapWordSize);
 372 
 373       _hrm->expand_at(first, obj_regions, workers());
 374       policy()->record_new_heap_size(num_regions());
 375 
 376 #ifdef ASSERT
 377       for (uint i = first; i < first + obj_regions; ++i) {
 378         HeapRegion* hr = region_at(i);
 379         assert(hr->is_free(), "sanity");
 380         assert(hr->is_empty(), "sanity");
 381         assert(is_on_master_free_list(hr), "sanity");
 382       }
 383 #endif
 384       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 385     } else {
 386       // Policy: Potentially trigger a defragmentation GC.
 387     }
 388   }
 389 
 390   HeapWord* result = NULL;
 391   if (first != G1_NO_HRM_INDEX) {
 392     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 393     assert(result != NULL, "it should always return a valid result");
 394 


1354   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1355                             expand_bytes, aligned_expand_bytes);
1356 
1357   if (is_maximal_no_gc()) {
1358     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1359     return false;
1360   }
1361 
1362   double expand_heap_start_time_sec = os::elapsedTime();
1363   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1364   assert(regions_to_expand > 0, "Must expand by at least one region");
1365 
1366   uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1367   if (expand_time_ms != NULL) {
1368     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1369   }
1370 
1371   if (expanded_by > 0) {
1372     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1373     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1374     policy()->record_new_heap_size(num_regions());
1375   } else {
1376     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1377 
1378     // The expansion of the virtual storage space was unsuccessful.
1379     // Let's see if it was because we ran out of swap.
1380     if (G1ExitOnExpansionFailure &&
1381         _hrm->available() >= regions_to_expand) {
1382       // We had head room...
1383       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1384     }
1385   }
1386   return regions_to_expand > 0;
1387 }
1388 
1389 bool G1CollectedHeap::expand_single_region(uint node_index) {
1390   uint expanded_by = _hrm->expand_on_preferred_node(node_index);
1391 
1392   if (expanded_by == 0) {
1393     assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
1394     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1395     return false;
1396   }
1397 
1398   policy()->record_new_heap_size(num_regions());
1399   return true;
1400 }
1401 
1402 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1403   size_t aligned_shrink_bytes =
1404     ReservedSpace::page_align_size_down(shrink_bytes);
1405   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1406                                          HeapRegion::GrainBytes);
1407   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1408 
1409   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1410   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1411 
1412   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1413                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1414   if (num_regions_removed > 0) {
1415     policy()->record_new_heap_size(num_regions());
1416   } else {
1417     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1418   }
1419 }
1420 
1421 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1422   _verifier->verify_region_sets_optional();
1423 
1424   // We should only reach here at the end of a Full GC or during Remark which
1425   // means we should not not be holding to any GC alloc regions. The method
1426   // below will make sure of that and do any remaining clean up.
1427   _allocator->abandon_gc_alloc_regions();
1428 
1429   // Instead of tearing down / rebuilding the free lists here, we
1430   // could instead use the remove_all_pending() method on free_list to
1431   // remove only the ones that we need to remove.
1432   tear_down_region_sets(true /* free_list_only */);
1433   shrink_helper(shrink_bytes);
1434   rebuild_region_sets(true /* free_list_only */);
1435 
1436   _hrm->verify_optional();
1437   _verifier->verify_region_sets_optional();
1438 }
1439 






1440 class OldRegionSetChecker : public HeapRegionSetChecker {
1441 public:
1442   void check_mt_safety() {
1443     // Master Old Set MT safety protocol:
1444     // (a) If we're at a safepoint, operations on the master old set
1445     // should be invoked:
1446     // - by the VM thread (which will serialize them), or
1447     // - by the GC workers while holding the FreeList_lock, if we're
1448     //   at a safepoint for an evacuation pause (this lock is taken
1449     //   anyway when an GC alloc region is retired so that a new one
1450     //   is allocated from the free list), or
1451     // - by the GC workers while holding the OldSets_lock, if we're at a
1452     //   safepoint for a cleanup pause.
1453     // (b) If we're not at a safepoint, operations on the master old set
1454     // should be invoked while holding the Heap_lock.
1455 
1456     if (SafepointSynchronize::is_at_safepoint()) {
1457       guarantee(Thread::current()->is_VM_thread() ||
1458                 FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
1459                 "master old set MT safety protocol at a safepoint");


2403 }
2404 
2405 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2406 // must be equal to the humongous object limit.
2407 size_t G1CollectedHeap::max_tlab_size() const {
2408   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2409 }
2410 
2411 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2412   return _allocator->unsafe_max_tlab_alloc();
2413 }
2414 
2415 size_t G1CollectedHeap::max_capacity() const {
2416   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2417 }
2418 
2419 size_t G1CollectedHeap::max_reserved_capacity() const {
2420   return _hrm->max_length() * HeapRegion::GrainBytes;
2421 }
2422 




2423 jlong G1CollectedHeap::millis_since_last_gc() {
2424   // See the notes in GenCollectedHeap::millis_since_last_gc()
2425   // for more information about the implementation.
2426   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2427                   _policy->collection_pause_end_millis();
2428   if (ret_val < 0) {
2429     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2430       ". returning zero instead.", ret_val);
2431     return 0;
2432   }
2433   return ret_val;
2434 }
2435 
2436 void G1CollectedHeap::deduplicate_string(oop str) {
2437   assert(java_lang_String::is_instance(str), "invariant");
2438 
2439   if (G1StringDedup::is_enabled()) {
2440     G1StringDedup::deduplicate(str);
2441   }
2442 }


2993 
2994 void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
2995   GCIdMark gc_id_mark;
2996 
2997   SvcGCMarker sgcm(SvcGCMarker::MINOR);
2998   ResourceMark rm;
2999 
3000   policy()->note_gc_start();
3001 
3002   _gc_timer_stw->register_gc_start();
3003   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3004 
3005   wait_for_root_region_scanning();
3006 
3007   print_heap_before_gc();
3008   print_heap_regions();
3009   trace_heap_before_gc(_gc_tracer_stw);
3010 
3011   _verifier->verify_region_sets_optional();
3012   _verifier->verify_dirty_young_regions();


3013 
3014   // We should not be doing initial mark unless the conc mark thread is running
3015   if (!_cm_thread->should_terminate()) {
3016     // This call will decide whether this pause is an initial-mark
3017     // pause. If it is, in_initial_mark_gc() will return true
3018     // for the duration of this pause.
3019     policy()->decide_on_conc_mark_initiation();
3020   }
3021 
3022   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3023   assert(!collector_state()->in_initial_mark_gc() ||
3024          collector_state()->in_young_only_phase(), "sanity");
3025   // We also do not allow mixed GCs during marking.
3026   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
3027 
3028   // Record whether this pause is an initial mark. When the current
3029   // thread has completed its logging output and it's safe to signal
3030   // the CM thread, the flag's value in the policy has been reset.
3031   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
3032   if (should_start_conc_mark) {




 354     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 355     // are lucky enough to find some.
 356     first = _hrm->find_contiguous_only_empty(obj_regions);
 357     if (first != G1_NO_HRM_INDEX) {
 358       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 359     }
 360   }
 361 
 362   if (first == G1_NO_HRM_INDEX) {
 363     // Policy: We could not find enough regions for the humongous object in the
 364     // free list. Look through the heap to find a mix of free and uncommitted regions.
 365     // If so, try expansion.
 366     first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
 367     if (first != G1_NO_HRM_INDEX) {
 368       // We found something. Make sure these regions are committed, i.e. expand
 369       // the heap. Alternatively we could do a defragmentation GC.
 370       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 371                                     word_size * HeapWordSize);
 372 
 373       _hrm->expand_at(first, obj_regions, workers());
 374       update_heap_target_size();
 375 
 376 #ifdef ASSERT
 377       for (uint i = first; i < first + obj_regions; ++i) {
 378         HeapRegion* hr = region_at(i);
 379         assert(hr->is_free(), "sanity");
 380         assert(hr->is_empty(), "sanity");
 381         assert(is_on_master_free_list(hr), "sanity");
 382       }
 383 #endif
 384       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 385     } else {
 386       // Policy: Potentially trigger a defragmentation GC.
 387     }
 388   }
 389 
 390   HeapWord* result = NULL;
 391   if (first != G1_NO_HRM_INDEX) {
 392     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 393     assert(result != NULL, "it should always return a valid result");
 394 


1354   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1355                             expand_bytes, aligned_expand_bytes);
1356 
1357   if (is_maximal_no_gc()) {
1358     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1359     return false;
1360   }
1361 
1362   double expand_heap_start_time_sec = os::elapsedTime();
1363   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1364   assert(regions_to_expand > 0, "Must expand by at least one region");
1365 
1366   uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1367   if (expand_time_ms != NULL) {
1368     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1369   }
1370 
1371   if (expanded_by > 0) {
1372     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1373     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1374     update_heap_target_size();
1375   } else {
1376     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1377 
1378     // The expansion of the virtual storage space was unsuccessful.
1379     // Let's see if it was because we ran out of swap.
1380     if (G1ExitOnExpansionFailure &&
1381         _hrm->available() >= regions_to_expand) {
1382       // We had head room...
1383       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1384     }
1385   }
1386   return regions_to_expand > 0;
1387 }
1388 
1389 bool G1CollectedHeap::expand_single_region(uint node_index) {
1390   uint expanded_by = _hrm->expand_on_preferred_node(node_index);
1391 
1392   if (expanded_by == 0) {
1393     assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
1394     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1395     return false;
1396   }
1397 
1398   update_heap_target_size();
1399   return true;
1400 }
1401 
1402 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1403   size_t aligned_shrink_bytes =
1404     ReservedSpace::page_align_size_down(shrink_bytes);
1405   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1406                                          HeapRegion::GrainBytes);
1407   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1408 
1409   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1410   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1411 
1412   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1413                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1414   if (num_regions_removed > 0) {
1415     update_heap_target_size();
1416   } else {
1417     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1418   }
1419 }
1420 
1421 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1422   _verifier->verify_region_sets_optional();
1423 
1424   // We should only reach here at the end of a Full GC or during Remark which
1425   // means we should not not be holding to any GC alloc regions. The method
1426   // below will make sure of that and do any remaining clean up.
1427   _allocator->abandon_gc_alloc_regions();
1428 
1429   // Instead of tearing down / rebuilding the free lists here, we
1430   // could instead use the remove_all_pending() method on free_list to
1431   // remove only the ones that we need to remove.
1432   tear_down_region_sets(true /* free_list_only */);
1433   shrink_helper(shrink_bytes);
1434   rebuild_region_sets(true /* free_list_only */);
1435 
1436   _hrm->verify_optional();
1437   _verifier->verify_region_sets_optional();
1438 }
1439 
1440 void G1CollectedHeap::update_heap_target_size() {
1441   uint soft_goal_num_regions = (soft_max_capacity() + HeapRegion::GrainBytes - 1) / HeapRegion::GrainBytes;
1442 
1443   _policy->update_heap_target_size(num_regions(), soft_goal_num_regions);
1444 }
1445 
1446 class OldRegionSetChecker : public HeapRegionSetChecker {
1447 public:
1448   void check_mt_safety() {
1449     // Master Old Set MT safety protocol:
1450     // (a) If we're at a safepoint, operations on the master old set
1451     // should be invoked:
1452     // - by the VM thread (which will serialize them), or
1453     // - by the GC workers while holding the FreeList_lock, if we're
1454     //   at a safepoint for an evacuation pause (this lock is taken
1455     //   anyway when an GC alloc region is retired so that a new one
1456     //   is allocated from the free list), or
1457     // - by the GC workers while holding the OldSets_lock, if we're at a
1458     //   safepoint for a cleanup pause.
1459     // (b) If we're not at a safepoint, operations on the master old set
1460     // should be invoked while holding the Heap_lock.
1461 
1462     if (SafepointSynchronize::is_at_safepoint()) {
1463       guarantee(Thread::current()->is_VM_thread() ||
1464                 FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
1465                 "master old set MT safety protocol at a safepoint");


2409 }
2410 
2411 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2412 // must be equal to the humongous object limit.
2413 size_t G1CollectedHeap::max_tlab_size() const {
2414   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2415 }
2416 
2417 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2418   return _allocator->unsafe_max_tlab_alloc();
2419 }
2420 
2421 size_t G1CollectedHeap::max_capacity() const {
2422   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2423 }
2424 
2425 size_t G1CollectedHeap::max_reserved_capacity() const {
2426   return _hrm->max_length() * HeapRegion::GrainBytes;
2427 }
2428 
2429 size_t G1CollectedHeap::soft_max_capacity() const {
2430   return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
2431 }
2432 
2433 jlong G1CollectedHeap::millis_since_last_gc() {
2434   // See the notes in GenCollectedHeap::millis_since_last_gc()
2435   // for more information about the implementation.
2436   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2437                   _policy->collection_pause_end_millis();
2438   if (ret_val < 0) {
2439     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2440       ". returning zero instead.", ret_val);
2441     return 0;
2442   }
2443   return ret_val;
2444 }
2445 
2446 void G1CollectedHeap::deduplicate_string(oop str) {
2447   assert(java_lang_String::is_instance(str), "invariant");
2448 
2449   if (G1StringDedup::is_enabled()) {
2450     G1StringDedup::deduplicate(str);
2451   }
2452 }


3003 
3004 void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
3005   GCIdMark gc_id_mark;
3006 
3007   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3008   ResourceMark rm;
3009 
3010   policy()->note_gc_start();
3011 
3012   _gc_timer_stw->register_gc_start();
3013   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3014 
3015   wait_for_root_region_scanning();
3016 
3017   print_heap_before_gc();
3018   print_heap_regions();
3019   trace_heap_before_gc(_gc_tracer_stw);
3020 
3021   _verifier->verify_region_sets_optional();
3022   _verifier->verify_dirty_young_regions();
3023 
3024   update_heap_target_size();
3025 
3026   // We should not be doing initial mark unless the conc mark thread is running
3027   if (!_cm_thread->should_terminate()) {
3028     // This call will decide whether this pause is an initial-mark
3029     // pause. If it is, in_initial_mark_gc() will return true
3030     // for the duration of this pause.
3031     policy()->decide_on_conc_mark_initiation();
3032   }
3033 
3034   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3035   assert(!collector_state()->in_initial_mark_gc() ||
3036          collector_state()->in_young_only_phase(), "sanity");
3037   // We also do not allow mixed GCs during marking.
3038   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
3039 
3040   // Record whether this pause is an initial mark. When the current
3041   // thread has completed its logging output and it's safe to signal
3042   // the CM thread, the flag's value in the policy has been reset.
3043   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
3044   if (should_start_conc_mark) {


< prev index next >