< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 11970 : imported patch 8157952-parallelize-memory-pretouch


1462          "maximum_desired_capacity = " SIZE_FORMAT,
1463          minimum_desired_capacity, maximum_desired_capacity);
1464 
1465   // Should not be greater than the heap max size. No need to adjust
1466   // it with respect to the heap min size as it's a lower bound (i.e.,
1467   // we'll try to make the capacity larger than it, not smaller).
1468   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1469   // Should not be less than the heap min size. No need to adjust it
1470   // with respect to the heap max size as it's an upper bound (i.e.,
1471   // we'll try to make the capacity smaller than it, not greater).
1472   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1473 
1474   if (capacity_after_gc < minimum_desired_capacity) {
1475     // Don't expand unless it's significant
1476     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1477 
1478     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1479                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1480                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1481 
1482     expand(expand_bytes);
1483 
1484     // No expansion, now see if we want to shrink
1485   } else if (capacity_after_gc > maximum_desired_capacity) {
1486     // Capacity too large, compute shrinking size
1487     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1488 
1489     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1490                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1491                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1492 
1493     shrink(shrink_bytes);
1494   }
1495 }
1496 
1497 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1498                                                             AllocationContext_t context,
1499                                                             bool do_gc,
1500                                                             bool clear_all_soft_refs,
1501                                                             bool expect_null_mutator_alloc_region,
1502                                                             bool* gc_succeeded) {


1582   // appropriate.
1583   assert(*succeeded, "sanity");
1584   return NULL;
1585 }
1586 
1587 // Attempting to expand the heap sufficiently
1588 // to support an allocation of the given "word_size".  If
1589 // successful, perform the allocation and return the address of the
1590 // allocated block, or else "NULL".
1591 
1592 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1593   assert_at_safepoint(true /* should_be_vm_thread */);
1594 
1595   _verifier->verify_region_sets_optional();
1596 
1597   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1598   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1599                             word_size * HeapWordSize);
1600 
1601 
1602   if (expand(expand_bytes)) {
1603     _hrm.verify_optional();
1604     _verifier->verify_region_sets_optional();
1605     return attempt_allocation_at_safepoint(word_size,
1606                                            context,
1607                                            false /* expect_null_mutator_alloc_region */);
1608   }
1609   return NULL;
1610 }
1611 
1612 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1613   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1614   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1615                                        HeapRegion::GrainBytes);
1616 
1617   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1618                             expand_bytes, aligned_expand_bytes);
1619 
1620   if (is_maximal_no_gc()) {
1621     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1622     return false;
1623   }
1624 
1625   double expand_heap_start_time_sec = os::elapsedTime();
1626   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1627   assert(regions_to_expand > 0, "Must expand by at least one region");
1628 
1629   uint expanded_by = _hrm.expand_by(regions_to_expand);
1630   if (expand_time_ms != NULL) {
1631     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1632   }
1633 
1634   if (expanded_by > 0) {
1635     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1636     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1637     g1_policy()->record_new_heap_size(num_regions());
1638   } else {
1639     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1640 
1641     // The expansion of the virtual storage space was unsuccessful.
1642     // Let's see if it was because we ran out of swap.
1643     if (G1ExitOnExpansionFailure &&
1644         _hrm.available() >= regions_to_expand) {
1645       // We had head room...
1646       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1647     }
1648   }
1649   return regions_to_expand > 0;


1910 
1911   {
1912     HeapWord* start = _hrm.reserved().start();
1913     HeapWord* end = _hrm.reserved().end();
1914     size_t granularity = HeapRegion::GrainBytes;
1915 
1916     _in_cset_fast_test.initialize(start, end, granularity);
1917     _humongous_reclaim_candidates.initialize(start, end, granularity);
1918   }
1919 
1920   // Create the G1ConcurrentMark data structure and thread.
1921   // (Must do this late, so that "max_regions" is defined.)
1922   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1923   if (_cm == NULL || !_cm->completed_initialization()) {
1924     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1925     return JNI_ENOMEM;
1926   }
1927   _cmThread = _cm->cmThread();
1928 
1929   // Now expand into the initial heap size.
1930   if (!expand(init_byte_size)) {
1931     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1932     return JNI_ENOMEM;
1933   }
1934 
1935   // Perform any initialization actions delegated to the policy.
1936   g1_policy()->init(this, &_collection_set);
1937 
1938   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1939                                                SATB_Q_FL_lock,
1940                                                G1SATBProcessCompletedThreshold,
1941                                                Shared_SATB_Q_lock);
1942 
1943   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1944                                                 DirtyCardQ_CBL_mon,
1945                                                 DirtyCardQ_FL_lock,
1946                                                 (int)concurrent_g1_refine()->yellow_zone(),
1947                                                 (int)concurrent_g1_refine()->red_zone(),
1948                                                 Shared_DirtyCardQ_lock,
1949                                                 NULL,  // fl_owner
1950                                                 true); // init_free_ids


3224           // they can start working to make sure that all the
3225           // appropriate initialization is done on the CM object.
3226           concurrent_mark()->checkpointRootsInitialPost();
3227           collector_state()->set_mark_in_progress(true);
3228           // Note that we don't actually trigger the CM thread at
3229           // this point. We do that later when we're sure that
3230           // the current thread has completed its logging output.
3231         }
3232 
3233         allocate_dummy_regions();
3234 
3235         _allocator->init_mutator_alloc_region();
3236 
3237         {
3238           size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3239           if (expand_bytes > 0) {
3240             size_t bytes_before = capacity();
3241             // No need for an ergo logging here,
3242             // expansion_amount() does this when it returns a value > 0.
3243             double expand_ms;
3244             if (!expand(expand_bytes, &expand_ms)) {
3245               // We failed to expand the heap. Cannot do anything about it.
3246             }
3247             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3248           }
3249         }
3250 
3251         // We redo the verification but now wrt to the new CSet which
3252         // has just got initialized after the previous CSet was freed.
3253         _cm->verify_no_cset_oops();
3254         _cm->note_end_of_gc();
3255 
3256         // This timing is only used by the ergonomics to handle our pause target.
3257         // It is unclear why this should not include the full pause. We will
3258         // investigate this in CR 7178365.
3259         double sample_end_time_sec = os::elapsedTime();
3260         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3261         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3262         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3263 
3264         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());




1462          "maximum_desired_capacity = " SIZE_FORMAT,
1463          minimum_desired_capacity, maximum_desired_capacity);
1464 
1465   // Should not be greater than the heap max size. No need to adjust
1466   // it with respect to the heap min size as it's a lower bound (i.e.,
1467   // we'll try to make the capacity larger than it, not smaller).
1468   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1469   // Should not be less than the heap min size. No need to adjust it
1470   // with respect to the heap max size as it's an upper bound (i.e.,
1471   // we'll try to make the capacity smaller than it, not greater).
1472   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1473 
1474   if (capacity_after_gc < minimum_desired_capacity) {
1475     // Don't expand unless it's significant
1476     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1477 
1478     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1479                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1480                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1481 
1482     expand(expand_bytes, _workers);
1483 
1484     // No expansion, now see if we want to shrink
1485   } else if (capacity_after_gc > maximum_desired_capacity) {
1486     // Capacity too large, compute shrinking size
1487     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1488 
1489     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1490                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1491                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1492 
1493     shrink(shrink_bytes);
1494   }
1495 }
1496 
1497 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1498                                                             AllocationContext_t context,
1499                                                             bool do_gc,
1500                                                             bool clear_all_soft_refs,
1501                                                             bool expect_null_mutator_alloc_region,
1502                                                             bool* gc_succeeded) {


1582   // appropriate.
1583   assert(*succeeded, "sanity");
1584   return NULL;
1585 }
1586 
1587 // Attempting to expand the heap sufficiently
1588 // to support an allocation of the given "word_size".  If
1589 // successful, perform the allocation and return the address of the
1590 // allocated block, or else "NULL".
1591 
1592 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1593   assert_at_safepoint(true /* should_be_vm_thread */);
1594 
1595   _verifier->verify_region_sets_optional();
1596 
1597   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1598   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1599                             word_size * HeapWordSize);
1600 
1601 
1602   if (expand(expand_bytes, _workers)) {
1603     _hrm.verify_optional();
1604     _verifier->verify_region_sets_optional();
1605     return attempt_allocation_at_safepoint(word_size,
1606                                            context,
1607                                            false /* expect_null_mutator_alloc_region */);
1608   }
1609   return NULL;
1610 }
1611 
1612 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1613   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1614   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1615                                        HeapRegion::GrainBytes);
1616 
1617   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1618                             expand_bytes, aligned_expand_bytes);
1619 
1620   if (is_maximal_no_gc()) {
1621     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1622     return false;
1623   }
1624 
1625   double expand_heap_start_time_sec = os::elapsedTime();
1626   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1627   assert(regions_to_expand > 0, "Must expand by at least one region");
1628 
1629   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1630   if (expand_time_ms != NULL) {
1631     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1632   }
1633 
1634   if (expanded_by > 0) {
1635     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1636     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1637     g1_policy()->record_new_heap_size(num_regions());
1638   } else {
1639     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1640 
1641     // The expansion of the virtual storage space was unsuccessful.
1642     // Let's see if it was because we ran out of swap.
1643     if (G1ExitOnExpansionFailure &&
1644         _hrm.available() >= regions_to_expand) {
1645       // We had head room...
1646       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1647     }
1648   }
1649   return regions_to_expand > 0;


1910 
1911   {
1912     HeapWord* start = _hrm.reserved().start();
1913     HeapWord* end = _hrm.reserved().end();
1914     size_t granularity = HeapRegion::GrainBytes;
1915 
1916     _in_cset_fast_test.initialize(start, end, granularity);
1917     _humongous_reclaim_candidates.initialize(start, end, granularity);
1918   }
1919 
1920   // Create the G1ConcurrentMark data structure and thread.
1921   // (Must do this late, so that "max_regions" is defined.)
1922   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1923   if (_cm == NULL || !_cm->completed_initialization()) {
1924     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1925     return JNI_ENOMEM;
1926   }
1927   _cmThread = _cm->cmThread();
1928 
1929   // Now expand into the initial heap size.
1930   if (!expand(init_byte_size, _workers)) {
1931     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1932     return JNI_ENOMEM;
1933   }
1934 
1935   // Perform any initialization actions delegated to the policy.
1936   g1_policy()->init(this, &_collection_set);
1937 
1938   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1939                                                SATB_Q_FL_lock,
1940                                                G1SATBProcessCompletedThreshold,
1941                                                Shared_SATB_Q_lock);
1942 
1943   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1944                                                 DirtyCardQ_CBL_mon,
1945                                                 DirtyCardQ_FL_lock,
1946                                                 (int)concurrent_g1_refine()->yellow_zone(),
1947                                                 (int)concurrent_g1_refine()->red_zone(),
1948                                                 Shared_DirtyCardQ_lock,
1949                                                 NULL,  // fl_owner
1950                                                 true); // init_free_ids


3224           // they can start working to make sure that all the
3225           // appropriate initialization is done on the CM object.
3226           concurrent_mark()->checkpointRootsInitialPost();
3227           collector_state()->set_mark_in_progress(true);
3228           // Note that we don't actually trigger the CM thread at
3229           // this point. We do that later when we're sure that
3230           // the current thread has completed its logging output.
3231         }
3232 
3233         allocate_dummy_regions();
3234 
3235         _allocator->init_mutator_alloc_region();
3236 
3237         {
3238           size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3239           if (expand_bytes > 0) {
3240             size_t bytes_before = capacity();
3241             // No need for an ergo logging here,
3242             // expansion_amount() does this when it returns a value > 0.
3243             double expand_ms;
3244             if (!expand(expand_bytes, _workers, &expand_ms)) {
3245               // We failed to expand the heap. Cannot do anything about it.
3246             }
3247             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3248           }
3249         }
3250 
3251         // We redo the verification but now wrt to the new CSet which
3252         // has just got initialized after the previous CSet was freed.
3253         _cm->verify_no_cset_oops();
3254         _cm->note_end_of_gc();
3255 
3256         // This timing is only used by the ergonomics to handle our pause target.
3257         // It is unclear why this should not include the full pause. We will
3258         // investigate this in CR 7178365.
3259         double sample_end_time_sec = os::elapsedTime();
3260         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3261         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3262         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3263 
3264         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());


< prev index next >