< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




 854 
 855   // We will loop until a) we manage to successfully perform the
 856   // allocation or b) we successfully schedule a collection which
 857   // fails to perform the allocation. b) is the only case when we'll
 858   // return NULL.
 859   HeapWord* result = NULL;
 860   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 861     bool should_try_gc;
 862     uint gc_count_before;
 863 
 864 
 865     {
 866       MutexLocker x(Heap_lock);
 867 
 868       // Given that humongous objects are not allocated in young
 869       // regions, we'll first try to do the allocation without doing a
 870       // collection hoping that there's enough space in the heap.
 871       result = humongous_obj_allocate(word_size);
 872       if (result != NULL) {
 873         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
 874         policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);

 875         return result;
 876       }
 877 
 878       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
 879       // the GCLocker initiated GC has been performed and then retry. This includes
 880       // the case when the GC Locker is not active but has not been performed.
 881       should_try_gc = !GCLocker::needs_gc();
 882       // Read the GC count while still holding the Heap_lock.
 883       gc_count_before = total_collections();
 884     }
 885 
 886     if (should_try_gc) {
 887       bool succeeded;
 888       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 889                                    GCCause::_g1_humongous_allocation);
 890       if (result != NULL) {
 891         assert(succeeded, "only way to get back a non-NULL result");
 892         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
 893                              Thread::current()->name(), p2i(result));
 894         return result;


4068   redirty_logged_cards(rdcqs);
4069 
4070   free_collection_set(&_collection_set, evacuation_info, per_thread_states->surviving_young_words());
4071 
4072   eagerly_reclaim_humongous_regions();
4073 
4074   record_obj_copy_mem_stats();
4075 
4076   evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
4077   evacuation_info.set_bytes_used(_bytes_used_during_gc);
4078 
4079 #if COMPILER2_OR_JVMCI
4080   double start = os::elapsedTime();
4081   DerivedPointerTable::update_pointers();
4082   phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4083 #endif
4084   policy()->print_age_table();
4085 }
4086 
4087 void G1CollectedHeap::record_obj_copy_mem_stats() {
4088   policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);

4089 
4090   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4091                                                create_g1_evac_summary(&_old_evac_stats));
4092 }
4093 
4094 void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
4095   assert(!hr->is_free(), "the region should not be free");
4096   assert(!hr->is_empty(), "the region should not be empty");
4097   assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
4098 
4099   if (G1VerifyBitmaps) {
4100     MemRegion mr(hr->bottom(), hr->end());
4101     concurrent_mark()->clear_range_in_prev_bitmap(mr);
4102   }
4103 
4104   // Clear the card counts for this region.
4105   // Note: we only need to do this if the region is not young
4106   // (since we don't refine cards in young regions).
4107   if (!hr->is_young()) {
4108     _hot_card_cache->reset_card_counts(hr);


4169 
4170     void merge_stats(FreeCSetStats* other) {
4171       assert(other != NULL, "invariant");
4172       _before_used_bytes += other->_before_used_bytes;
4173       _after_used_bytes += other->_after_used_bytes;
4174       _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
4175       _failure_used_words += other->_failure_used_words;
4176       _failure_waste_words += other->_failure_waste_words;
4177       _rs_length += other->_rs_length;
4178       _regions_freed += other->_regions_freed;
4179     }
4180 
4181     void report(G1CollectedHeap* g1h, G1EvacuationInfo* evacuation_info) {
4182       evacuation_info->set_regions_freed(_regions_freed);
4183       evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4184 
4185       g1h->decrement_summary_bytes(_before_used_bytes);
4186       g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4187 
4188       G1Policy *policy = g1h->policy();
4189       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4190       policy->record_rs_length(_rs_length);
4191       policy->cset_regions_freed();
4192     }
4193 
4194     void account_failed_region(HeapRegion* r) {
4195       size_t used_words = r->marked_bytes() / HeapWordSize;
4196       _failure_used_words += used_words;
4197       _failure_waste_words += HeapRegion::GrainWords - used_words;
4198       _after_used_bytes += r->used();
4199 
4200       // When moving a young gen region to old gen, we "allocate" that whole
4201       // region there. This is in addition to any already evacuated objects.
4202       // Notify the policy about that. Old gen regions do not cause an
4203       // additional allocation: both the objects still in the region and the
4204       // ones already moved are accounted for elsewhere.
4205       if (r->is_young()) {
4206         _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4207       }
4208     }
4209 




 854 
 855   // We will loop until a) we manage to successfully perform the
 856   // allocation or b) we successfully schedule a collection which
 857   // fails to perform the allocation. b) is the only case when we'll
 858   // return NULL.
 859   HeapWord* result = NULL;
 860   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 861     bool should_try_gc;
 862     uint gc_count_before;
 863 
 864 
 865     {
 866       MutexLocker x(Heap_lock);
 867 
 868       // Given that humongous objects are not allocated in young
 869       // regions, we'll first try to do the allocation without doing a
 870       // collection hoping that there's enough space in the heap.
 871       result = humongous_obj_allocate(word_size);
 872       if (result != NULL) {
 873         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
 874         policy()->old_gen_alloc_tracker()->
 875           add_allocated_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
 876         return result;
 877       }
 878 
 879       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
 880       // the GCLocker initiated GC has been performed and then retry. This includes
 881       // the case when the GC Locker is not active but has not been performed.
 882       should_try_gc = !GCLocker::needs_gc();
 883       // Read the GC count while still holding the Heap_lock.
 884       gc_count_before = total_collections();
 885     }
 886 
 887     if (should_try_gc) {
 888       bool succeeded;
 889       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 890                                    GCCause::_g1_humongous_allocation);
 891       if (result != NULL) {
 892         assert(succeeded, "only way to get back a non-NULL result");
 893         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
 894                              Thread::current()->name(), p2i(result));
 895         return result;


4069   redirty_logged_cards(rdcqs);
4070 
4071   free_collection_set(&_collection_set, evacuation_info, per_thread_states->surviving_young_words());
4072 
4073   eagerly_reclaim_humongous_regions();
4074 
4075   record_obj_copy_mem_stats();
4076 
4077   evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
4078   evacuation_info.set_bytes_used(_bytes_used_during_gc);
4079 
4080 #if COMPILER2_OR_JVMCI
4081   double start = os::elapsedTime();
4082   DerivedPointerTable::update_pointers();
4083   phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4084 #endif
4085   policy()->print_age_table();
4086 }
4087 
4088 void G1CollectedHeap::record_obj_copy_mem_stats() {
4089   policy()->old_gen_alloc_tracker()->
4090     add_allocated_bytes_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4091 
4092   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4093                                                create_g1_evac_summary(&_old_evac_stats));
4094 }
4095 
4096 void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
4097   assert(!hr->is_free(), "the region should not be free");
4098   assert(!hr->is_empty(), "the region should not be empty");
4099   assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
4100 
4101   if (G1VerifyBitmaps) {
4102     MemRegion mr(hr->bottom(), hr->end());
4103     concurrent_mark()->clear_range_in_prev_bitmap(mr);
4104   }
4105 
4106   // Clear the card counts for this region.
4107   // Note: we only need to do this if the region is not young
4108   // (since we don't refine cards in young regions).
4109   if (!hr->is_young()) {
4110     _hot_card_cache->reset_card_counts(hr);


4171 
4172     void merge_stats(FreeCSetStats* other) {
4173       assert(other != NULL, "invariant");
4174       _before_used_bytes += other->_before_used_bytes;
4175       _after_used_bytes += other->_after_used_bytes;
4176       _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
4177       _failure_used_words += other->_failure_used_words;
4178       _failure_waste_words += other->_failure_waste_words;
4179       _rs_length += other->_rs_length;
4180       _regions_freed += other->_regions_freed;
4181     }
4182 
4183     void report(G1CollectedHeap* g1h, G1EvacuationInfo* evacuation_info) {
4184       evacuation_info->set_regions_freed(_regions_freed);
4185       evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4186 
4187       g1h->decrement_summary_bytes(_before_used_bytes);
4188       g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4189 
4190       G1Policy *policy = g1h->policy();
4191       policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4192       policy->record_rs_length(_rs_length);
4193       policy->cset_regions_freed();
4194     }
4195 
4196     void account_failed_region(HeapRegion* r) {
4197       size_t used_words = r->marked_bytes() / HeapWordSize;
4198       _failure_used_words += used_words;
4199       _failure_waste_words += HeapRegion::GrainWords - used_words;
4200       _after_used_bytes += r->used();
4201 
4202       // When moving a young gen region to old gen, we "allocate" that whole
4203       // region there. This is in addition to any already evacuated objects.
4204       // Notify the policy about that. Old gen regions do not cause an
4205       // additional allocation: both the objects still in the region and the
4206       // ones already moved are accounted for elsewhere.
4207       if (r->is_young()) {
4208         _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4209       }
4210     }
4211 


< prev index next >