< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 9280 : imported patch 8140585-PLAB-statistics-flushed-too-late
rev 9282 : dihop-changes


 410       // not last one
 411       assert(new_top > hr->end(), "new_top should be above this region");
 412       hr->set_top(hr->end());
 413       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 414     }
 415   }
 416   // If we have continues humongous regions (hr != NULL), then the
 417   // end of the last one should match new_end and its top should
 418   // match new_top.
 419   assert(hr == NULL ||
 420          (hr->end() == new_end && hr->top() == new_top), "sanity");
 421   check_bitmaps("Humongous Region Allocation", first_hr);
 422 
 423   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 424   increase_used(first_hr->used());
 425   _humongous_set.add(first_hr);
 426 
 427   return new_obj;
 428 }
 429 





 430 // If could fit into free regions w/o expansion, try.
 431 // Otherwise, if can expand, do so.
 432 // Otherwise, if using ex regions might help, try with ex given back.
 433 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 434   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 435 
 436   verify_region_sets_optional();
 437 
 438   uint first = G1_NO_HRM_INDEX;
 439   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 440 
 441   if (obj_regions == 1) {
 442     // Only one region to allocate, try to use a fast path by directly allocating
 443     // from the free lists. Do not try to expand here, we will potentially do that
 444     // later.
 445     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 446     if (hr != NULL) {
 447       first = hr->hrm_index();
 448     }
 449   } else {
 450     // We can't allocate humongous regions spanning more than one region while
 451     // cleanupComplete() is running, since some of the regions we find to be
 452     // empty might not yet be added to the free list. It is not straightforward
 453     // to know in which list they are on so that we can remove them. We only
 454     // need to do this if we need to allocate more than one region to satisfy the
 455     // current humongous allocation request. If we are only allocating one region
 456     // we use the one-region region allocation code (see above), that already
 457     // potentially waits for regions from the secondary free list.
 458     wait_while_free_regions_coming();
 459     append_secondary_free_list_if_not_empty_with_lock();


1019     collect(GCCause::_g1_humongous_allocation);
1020   }
1021 
1022   // We will loop until a) we manage to successfully perform the
1023   // allocation or b) we successfully schedule a collection which
1024   // fails to perform the allocation. b) is the only case when we'll
1025   // return NULL.
1026   HeapWord* result = NULL;
1027   for (int try_count = 1; /* we'll return */; try_count += 1) {
1028     bool should_try_gc;
1029     uint gc_count_before;
1030 
1031     {
1032       MutexLockerEx x(Heap_lock);
1033 
1034       // Given that humongous objects are not allocated in young
1035       // regions, we'll first try to do the allocation without doing a
1036       // collection hoping that there's enough space in the heap.
1037       result = humongous_obj_allocate(word_size, AllocationContext::current());
1038       if (result != NULL) {

1039         return result;
1040       }
1041 
1042       if (GC_locker::is_active_and_needs_gc()) {
1043         should_try_gc = false;
1044       } else {
1045          // The GCLocker may not be active but the GCLocker initiated
1046         // GC may not yet have been performed (GCLocker::needs_gc()
1047         // returns true). In this case we do not try this GC and
1048         // wait until the GCLocker initiated GC is performed, and
1049         // then retry the allocation.
1050         if (GC_locker::needs_gc()) {
1051           should_try_gc = false;
1052         } else {
1053           // Read the GC count while still holding the Heap_lock.
1054           gc_count_before = total_collections();
1055           should_try_gc = true;
1056         }
1057       }
1058     }


5266 
5267   _survivor_evac_stats.adjust_desired_plab_sz();
5268   _old_evac_stats.adjust_desired_plab_sz();
5269 
5270   // Reset and re-enable the hot card cache.
5271   // Note the counts for the cards in the regions in the
5272   // collection set are reset when the collection set is freed.
5273   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5274   hot_card_cache->reset_hot_cache();
5275   hot_card_cache->set_use_cache(true);
5276 
5277   purge_code_root_memory();
5278 
5279   redirty_logged_cards();
5280 #if defined(COMPILER2) || INCLUDE_JVMCI
5281   DerivedPointerTable::update_pointers();
5282 #endif
5283 }
5284 
5285 void G1CollectedHeap::record_obj_copy_mem_stats() {


5286   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5287                                                create_g1_evac_summary(&_old_evac_stats));
5288 }
5289 
5290 void G1CollectedHeap::free_region(HeapRegion* hr,
5291                                   FreeRegionList* free_list,
5292                                   bool par,
5293                                   bool locked) {
5294   assert(!hr->is_free(), "the region should not be free");
5295   assert(!hr->is_empty(), "the region should not be empty");
5296   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5297   assert(free_list != NULL, "pre-condition");
5298 
5299   if (G1VerifyBitmaps) {
5300     MemRegion mr(hr->bottom(), hr->end());
5301     concurrent_mark()->clearRangePrevBitmap(mr);
5302   }
5303 
5304   // Clear the card counts for this region.
5305   // Note: we only need to do this if the region is not young




 410       // not last one
 411       assert(new_top > hr->end(), "new_top should be above this region");
 412       hr->set_top(hr->end());
 413       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 414     }
 415   }
 416   // If we have continues humongous regions (hr != NULL), then the
 417   // end of the last one should match new_end and its top should
 418   // match new_top.
 419   assert(hr == NULL ||
 420          (hr->end() == new_end && hr->top() == new_top), "sanity");
 421   check_bitmaps("Humongous Region Allocation", first_hr);
 422 
 423   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 424   increase_used(first_hr->used());
 425   _humongous_set.add(first_hr);
 426 
 427   return new_obj;
 428 }
 429 
 430 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 431   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);  
 432   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 433 }
 434 
 435 // If could fit into free regions w/o expansion, try.
 436 // Otherwise, if can expand, do so.
 437 // Otherwise, if using ex regions might help, try with ex given back.
 438 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 439   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 440 
 441   verify_region_sets_optional();
 442 
 443   uint first = G1_NO_HRM_INDEX;
 444   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 445 
 446   if (obj_regions == 1) {
 447     // Only one region to allocate, try to use a fast path by directly allocating
 448     // from the free lists. Do not try to expand here, we will potentially do that
 449     // later.
 450     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 451     if (hr != NULL) {
 452       first = hr->hrm_index();
 453     }
 454   } else {
 455     // We can't allocate humongous regions spanning more than one region while
 456     // cleanupComplete() is running, since some of the regions we find to be
 457     // empty might not yet be added to the free list. It is not straightforward
 458     // to know in which list they are on so that we can remove them. We only
 459     // need to do this if we need to allocate more than one region to satisfy the
 460     // current humongous allocation request. If we are only allocating one region
 461     // we use the one-region region allocation code (see above), that already
 462     // potentially waits for regions from the secondary free list.
 463     wait_while_free_regions_coming();
 464     append_secondary_free_list_if_not_empty_with_lock();


1024     collect(GCCause::_g1_humongous_allocation);
1025   }
1026 
1027   // We will loop until a) we manage to successfully perform the
1028   // allocation or b) we successfully schedule a collection which
1029   // fails to perform the allocation. b) is the only case when we'll
1030   // return NULL.
1031   HeapWord* result = NULL;
1032   for (int try_count = 1; /* we'll return */; try_count += 1) {
1033     bool should_try_gc;
1034     uint gc_count_before;
1035 
1036     {
1037       MutexLockerEx x(Heap_lock);
1038 
1039       // Given that humongous objects are not allocated in young
1040       // regions, we'll first try to do the allocation without doing a
1041       // collection hoping that there's enough space in the heap.
1042       result = humongous_obj_allocate(word_size, AllocationContext::current());
1043       if (result != NULL) {
1044         g1_policy()->add_last_old_allocated_bytes(humongous_obj_size_in_regions(word_size) * HeapRegion::GrainBytes);
1045         return result;
1046       }
1047 
1048       if (GC_locker::is_active_and_needs_gc()) {
1049         should_try_gc = false;
1050       } else {
1051          // The GCLocker may not be active but the GCLocker initiated
1052         // GC may not yet have been performed (GCLocker::needs_gc()
1053         // returns true). In this case we do not try this GC and
1054         // wait until the GCLocker initiated GC is performed, and
1055         // then retry the allocation.
1056         if (GC_locker::needs_gc()) {
1057           should_try_gc = false;
1058         } else {
1059           // Read the GC count while still holding the Heap_lock.
1060           gc_count_before = total_collections();
1061           should_try_gc = true;
1062         }
1063       }
1064     }


5272 
5273   _survivor_evac_stats.adjust_desired_plab_sz();
5274   _old_evac_stats.adjust_desired_plab_sz();
5275 
5276   // Reset and re-enable the hot card cache.
5277   // Note the counts for the cards in the regions in the
5278   // collection set are reset when the collection set is freed.
5279   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5280   hot_card_cache->reset_hot_cache();
5281   hot_card_cache->set_use_cache(true);
5282 
5283   purge_code_root_memory();
5284 
5285   redirty_logged_cards();
5286 #if defined(COMPILER2) || INCLUDE_JVMCI
5287   DerivedPointerTable::update_pointers();
5288 #endif
5289 }
5290 
5291 void G1CollectedHeap::record_obj_copy_mem_stats() {
5292   g1_policy()->add_last_old_allocated_bytes(_old_evac_stats.allocated() * HeapWordSize);
5293 
5294   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5295                                                create_g1_evac_summary(&_old_evac_stats));
5296 }
5297 
5298 void G1CollectedHeap::free_region(HeapRegion* hr,
5299                                   FreeRegionList* free_list,
5300                                   bool par,
5301                                   bool locked) {
5302   assert(!hr->is_free(), "the region should not be free");
5303   assert(!hr->is_empty(), "the region should not be empty");
5304   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5305   assert(free_list != NULL, "pre-condition");
5306 
5307   if (G1VerifyBitmaps) {
5308     MemRegion mr(hr->bottom(), hr->end());
5309     concurrent_mark()->clearRangePrevBitmap(mr);
5310   }
5311 
5312   // Clear the card counts for this region.
5313   // Note: we only need to do this if the region is not young


< prev index next >