< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 355 
 356   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 357 
 358   assert(words_not_fillable == 0 ||
 359          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 360          "Miscalculation in humongous allocation");
 361 
 362   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 363 
 364   for (uint i = first; i <= last; ++i) {
 365     hr = region_at(i);
 366     _humongous_set.add(hr);
 367     _hr_printer.alloc(hr);
 368   }
 369 
 370   return new_obj;
 371 }
 372 
 373 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 374   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 375   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 376 }
 377 
 378 // If could fit into free regions w/o expansion, try.
 379 // Otherwise, if can expand, do so.
 380 // Otherwise, if using ex regions might help, try with ex given back.
 381 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 382   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 383 
 384   _verifier->verify_region_sets_optional();
 385 
 386   uint first = G1_NO_HRM_INDEX;
 387   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 388 
 389   if (obj_regions == 1) {
 390     // Only one region to allocate, try to use a fast path by directly allocating
 391     // from the free lists. Do not try to expand here, we will potentially do that
 392     // later.
 393     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 394     if (hr != NULL) {
 395       first = hr->hrm_index();


1588   assert_at_safepoint(true /* should_be_vm_thread */);
1589 
1590   _verifier->verify_region_sets_optional();
1591 
1592   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1593   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1594                             word_size * HeapWordSize);
1595 
1596 
1597   if (expand(expand_bytes, _workers)) {
1598     _hrm.verify_optional();
1599     _verifier->verify_region_sets_optional();
1600     return attempt_allocation_at_safepoint(word_size,
1601                                            context,
1602                                            false /* expect_null_mutator_alloc_region */);
1603   }
1604   return NULL;
1605 }
1606 
1607 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1608   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1609   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1610                                        HeapRegion::GrainBytes);
1611 
1612   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1613                             expand_bytes, aligned_expand_bytes);
1614 
1615   if (is_maximal_no_gc()) {
1616     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1617     return false;
1618   }
1619 
1620   double expand_heap_start_time_sec = os::elapsedTime();
1621   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1622   assert(regions_to_expand > 0, "Must expand by at least one region");
1623 
1624   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1625   if (expand_time_ms != NULL) {
1626     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1627   }
1628 
1629   if (expanded_by > 0) {
1630     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1631     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1632     g1_policy()->record_new_heap_size(num_regions());
1633   } else {
1634     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1635 
1636     // The expansion of the virtual storage space was unsuccessful.
1637     // Let's see if it was because we ran out of swap.
1638     if (G1ExitOnExpansionFailure &&
1639         _hrm.available() >= regions_to_expand) {
1640       // We had head room...
1641       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1642     }
1643   }
1644   return regions_to_expand > 0;
1645 }
1646 
1647 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1648   size_t aligned_shrink_bytes =
1649     ReservedSpace::page_align_size_down(shrink_bytes);
1650   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1651                                          HeapRegion::GrainBytes);
1652   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1653 
1654   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1655   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1656 
1657 
1658   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1659                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1660   if (num_regions_removed > 0) {
1661     g1_policy()->record_new_heap_size(num_regions());
1662   } else {
1663     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1664   }
1665 }
1666 
1667 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1668   _verifier->verify_region_sets_optional();
1669 
1670   // We should only reach here at the end of a Full GC which means we


2418 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2419   HeapRegion* hr = heap_region_containing(addr);
2420   return hr->block_is_obj(addr);
2421 }
2422 
2423 bool G1CollectedHeap::supports_tlab_allocation() const {
2424   return true;
2425 }
2426 
2427 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2428   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2429 }
2430 
2431 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2432   return _eden.length() * HeapRegion::GrainBytes;
2433 }
2434 
2435 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2436 // must be equal to the humongous object limit.
2437 size_t G1CollectedHeap::max_tlab_size() const {
2438   return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2439 }
2440 
2441 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2442   AllocationContext_t context = AllocationContext::current();
2443   return _allocator->unsafe_max_tlab_alloc(context);
2444 }
2445 
2446 size_t G1CollectedHeap::max_capacity() const {
2447   return _hrm.reserved().byte_size();
2448 }
2449 
2450 jlong G1CollectedHeap::millis_since_last_gc() {
2451   // See the notes in GenCollectedHeap::millis_since_last_gc()
2452   // for more information about the implementation.
2453   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2454     _g1_policy->collection_pause_end_millis();
2455   if (ret_val < 0) {
2456     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2457       ". returning zero instead.", ret_val);
2458     return 0;




 355 
 356   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 357 
 358   assert(words_not_fillable == 0 ||
 359          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 360          "Miscalculation in humongous allocation");
 361 
 362   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 363 
 364   for (uint i = first; i <= last; ++i) {
 365     hr = region_at(i);
 366     _humongous_set.add(hr);
 367     _hr_printer.alloc(hr);
 368   }
 369 
 370   return new_obj;
 371 }
 372 
 373 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 374   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 375   return align_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 376 }
 377 
 378 // If could fit into free regions w/o expansion, try.
 379 // Otherwise, if can expand, do so.
 380 // Otherwise, if using ex regions might help, try with ex given back.
 381 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 382   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 383 
 384   _verifier->verify_region_sets_optional();
 385 
 386   uint first = G1_NO_HRM_INDEX;
 387   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 388 
 389   if (obj_regions == 1) {
 390     // Only one region to allocate, try to use a fast path by directly allocating
 391     // from the free lists. Do not try to expand here, we will potentially do that
 392     // later.
 393     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 394     if (hr != NULL) {
 395       first = hr->hrm_index();


1588   assert_at_safepoint(true /* should_be_vm_thread */);
1589 
1590   _verifier->verify_region_sets_optional();
1591 
1592   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1593   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1594                             word_size * HeapWordSize);
1595 
1596 
1597   if (expand(expand_bytes, _workers)) {
1598     _hrm.verify_optional();
1599     _verifier->verify_region_sets_optional();
1600     return attempt_allocation_at_safepoint(word_size,
1601                                            context,
1602                                            false /* expect_null_mutator_alloc_region */);
1603   }
1604   return NULL;
1605 }
1606 
1607 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1608   size_t aligned_expand_bytes = ReservedSpace::page_align_up(expand_bytes);
1609   aligned_expand_bytes = align_up(aligned_expand_bytes,
1610                                        HeapRegion::GrainBytes);
1611 
1612   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1613                             expand_bytes, aligned_expand_bytes);
1614 
1615   if (is_maximal_no_gc()) {
1616     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1617     return false;
1618   }
1619 
1620   double expand_heap_start_time_sec = os::elapsedTime();
1621   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1622   assert(regions_to_expand > 0, "Must expand by at least one region");
1623 
1624   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1625   if (expand_time_ms != NULL) {
1626     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1627   }
1628 
1629   if (expanded_by > 0) {
1630     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1631     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1632     g1_policy()->record_new_heap_size(num_regions());
1633   } else {
1634     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1635 
1636     // The expansion of the virtual storage space was unsuccessful.
1637     // Let's see if it was because we ran out of swap.
1638     if (G1ExitOnExpansionFailure &&
1639         _hrm.available() >= regions_to_expand) {
1640       // We had head room...
1641       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1642     }
1643   }
1644   return regions_to_expand > 0;
1645 }
1646 
1647 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1648   size_t aligned_shrink_bytes =
1649     ReservedSpace::page_align_down(shrink_bytes);
1650   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1651                                          HeapRegion::GrainBytes);
1652   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1653 
1654   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1655   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1656 
1657 
1658   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1659                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1660   if (num_regions_removed > 0) {
1661     g1_policy()->record_new_heap_size(num_regions());
1662   } else {
1663     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1664   }
1665 }
1666 
1667 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1668   _verifier->verify_region_sets_optional();
1669 
1670   // We should only reach here at the end of a Full GC which means we


2418 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2419   HeapRegion* hr = heap_region_containing(addr);
2420   return hr->block_is_obj(addr);
2421 }
2422 
2423 bool G1CollectedHeap::supports_tlab_allocation() const {
2424   return true;
2425 }
2426 
2427 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2428   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2429 }
2430 
2431 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2432   return _eden.length() * HeapRegion::GrainBytes;
2433 }
2434 
2435 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2436 // must be equal to the humongous object limit.
2437 size_t G1CollectedHeap::max_tlab_size() const {
2438   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2439 }
2440 
2441 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2442   AllocationContext_t context = AllocationContext::current();
2443   return _allocator->unsafe_max_tlab_alloc(context);
2444 }
2445 
2446 size_t G1CollectedHeap::max_capacity() const {
2447   return _hrm.reserved().byte_size();
2448 }
2449 
2450 jlong G1CollectedHeap::millis_since_last_gc() {
2451   // See the notes in GenCollectedHeap::millis_since_last_gc()
2452   // for more information about the implementation.
2453   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2454     _g1_policy->collection_pause_end_millis();
2455   if (ret_val < 0) {
2456     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2457       ". returning zero instead.", ret_val);
2458     return 0;


< prev index next >