< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 9431 : dihop-changes
rev 9433 : imported patch erik-jmasa-review
rev 9435 : [mq]: mikael-erik-review


 999     collect(GCCause::_g1_humongous_allocation);
1000   }
1001 
1002   // We will loop until a) we manage to successfully perform the
1003   // allocation or b) we successfully schedule a collection which
1004   // fails to perform the allocation. b) is the only case when we'll
1005   // return NULL.
1006   HeapWord* result = NULL;
1007   for (int try_count = 1; /* we'll return */; try_count += 1) {
1008     bool should_try_gc;
1009     uint gc_count_before;
1010 
1011     {
1012       MutexLockerEx x(Heap_lock);
1013 
1014       // Given that humongous objects are not allocated in young
1015       // regions, we'll first try to do the allocation without doing a
1016       // collection hoping that there's enough space in the heap.
1017       result = humongous_obj_allocate(word_size, AllocationContext::current());
1018       if (result != NULL) {
1019         g1_policy()->add_last_old_allocated_bytes(humongous_obj_size_in_regions(word_size) * HeapRegion::GrainBytes);

1020         return result;
1021       }
1022 
1023       if (GC_locker::is_active_and_needs_gc()) {
1024         should_try_gc = false;
1025       } else {
1026          // The GCLocker may not be active but the GCLocker initiated
1027         // GC may not yet have been performed (GCLocker::needs_gc()
1028         // returns true). In this case we do not try this GC and
1029         // wait until the GCLocker initiated GC is performed, and
1030         // then retry the allocation.
1031         if (GC_locker::needs_gc()) {
1032           should_try_gc = false;
1033         } else {
1034           // Read the GC count while still holding the Heap_lock.
1035           gc_count_before = total_collections();
1036           should_try_gc = true;
1037         }
1038       }
1039     }


5223 
5224   _survivor_evac_stats.adjust_desired_plab_sz();
5225   _old_evac_stats.adjust_desired_plab_sz();
5226 
5227   // Reset and re-enable the hot card cache.
5228   // Note the counts for the cards in the regions in the
5229   // collection set are reset when the collection set is freed.
5230   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5231   hot_card_cache->reset_hot_cache();
5232   hot_card_cache->set_use_cache(true);
5233 
5234   purge_code_root_memory();
5235 
5236   redirty_logged_cards();
5237 #if defined(COMPILER2) || INCLUDE_JVMCI
5238   DerivedPointerTable::update_pointers();
5239 #endif
5240 }
5241 
5242 void G1CollectedHeap::record_obj_copy_mem_stats() {
5243   g1_policy()->add_last_old_allocated_bytes(_old_evac_stats.allocated() * HeapWordSize);
5244 
5245   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5246                                                create_g1_evac_summary(&_old_evac_stats));
5247 }
5248 
5249 void G1CollectedHeap::free_region(HeapRegion* hr,
5250                                   FreeRegionList* free_list,
5251                                   bool par,
5252                                   bool locked) {
5253   assert(!hr->is_free(), "the region should not be free");
5254   assert(!hr->is_empty(), "the region should not be empty");
5255   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5256   assert(free_list != NULL, "pre-condition");
5257 
5258   if (G1VerifyBitmaps) {
5259     MemRegion mr(hr->bottom(), hr->end());
5260     concurrent_mark()->clearRangePrevBitmap(mr);
5261   }
5262 
5263   // Clear the card counts for this region.


5615 
5616     if (!cur->evacuation_failed()) {
5617       MemRegion used_mr = cur->used_region();
5618 
5619       // And the region is empty.
5620       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5621       pre_used += cur->used();
5622       free_region(cur, &local_free_list, false /* par */, true /* locked */);
5623     } else {
5624       cur->uninstall_surv_rate_group();
5625       if (cur->is_young()) {
5626         cur->set_young_index_in_cset(-1);
5627       }
5628       cur->set_evacuation_failed(false);
5629       // When moving a young gen region to old gen, we "allocate" that whole region
5630       // there. This is in addition to any already evacuated objects. Notify the
5631       // policy about that.
5632       // Old gen regions do not cause an additional allocation: both the objects
5633       // still in the region and the ones already moved are accounted for elsewhere.
5634       if (cur->is_young()) {
5635         policy->add_last_old_allocated_bytes(HeapRegion::GrainBytes);
5636       }
5637       // The region is now considered to be old.
5638       cur->set_old();
5639       // Do some allocation statistics accounting. Regions that failed evacuation
5640       // are always made old, so there is no need to update anything in the young
5641       // gen statistics, but we need to update old gen statistics.
5642       size_t used_words = cur->marked_bytes() / HeapWordSize;
5643       _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5644       _old_set.add(cur);
5645       evacuation_info.increment_collectionset_used_after(cur->used());
5646     }
5647     cur = next;
5648   }
5649 
5650   evacuation_info.set_regions_freed(local_free_list.length());
5651   policy->record_max_rs_lengths(rs_lengths);
5652   policy->cset_regions_freed();
5653 
5654   double end_sec = os::elapsedTime();
5655   double elapsed_ms = (end_sec - start_sec) * 1000.0;




 999     collect(GCCause::_g1_humongous_allocation);
1000   }
1001 
1002   // We will loop until a) we manage to successfully perform the
1003   // allocation or b) we successfully schedule a collection which
1004   // fails to perform the allocation. b) is the only case when we'll
1005   // return NULL.
1006   HeapWord* result = NULL;
1007   for (int try_count = 1; /* we'll return */; try_count += 1) {
1008     bool should_try_gc;
1009     uint gc_count_before;
1010 
1011     {
1012       MutexLockerEx x(Heap_lock);
1013 
1014       // Given that humongous objects are not allocated in young
1015       // regions, we'll first try to do the allocation without doing a
1016       // collection hoping that there's enough space in the heap.
1017       result = humongous_obj_allocate(word_size, AllocationContext::current());
1018       if (result != NULL) {
1019         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
1020         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
1021         return result;
1022       }
1023 
1024       if (GC_locker::is_active_and_needs_gc()) {
1025         should_try_gc = false;
1026       } else {
1027          // The GCLocker may not be active but the GCLocker initiated
1028         // GC may not yet have been performed (GCLocker::needs_gc()
1029         // returns true). In this case we do not try this GC and
1030         // wait until the GCLocker initiated GC is performed, and
1031         // then retry the allocation.
1032         if (GC_locker::needs_gc()) {
1033           should_try_gc = false;
1034         } else {
1035           // Read the GC count while still holding the Heap_lock.
1036           gc_count_before = total_collections();
1037           should_try_gc = true;
1038         }
1039       }
1040     }


5224 
5225   _survivor_evac_stats.adjust_desired_plab_sz();
5226   _old_evac_stats.adjust_desired_plab_sz();
5227 
5228   // Reset and re-enable the hot card cache.
5229   // Note the counts for the cards in the regions in the
5230   // collection set are reset when the collection set is freed.
5231   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5232   hot_card_cache->reset_hot_cache();
5233   hot_card_cache->set_use_cache(true);
5234 
5235   purge_code_root_memory();
5236 
5237   redirty_logged_cards();
5238 #if defined(COMPILER2) || INCLUDE_JVMCI
5239   DerivedPointerTable::update_pointers();
5240 #endif
5241 }
5242 
5243 void G1CollectedHeap::record_obj_copy_mem_stats() {
5244   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
5245 
5246   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5247                                                create_g1_evac_summary(&_old_evac_stats));
5248 }
5249 
5250 void G1CollectedHeap::free_region(HeapRegion* hr,
5251                                   FreeRegionList* free_list,
5252                                   bool par,
5253                                   bool locked) {
5254   assert(!hr->is_free(), "the region should not be free");
5255   assert(!hr->is_empty(), "the region should not be empty");
5256   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5257   assert(free_list != NULL, "pre-condition");
5258 
5259   if (G1VerifyBitmaps) {
5260     MemRegion mr(hr->bottom(), hr->end());
5261     concurrent_mark()->clearRangePrevBitmap(mr);
5262   }
5263 
5264   // Clear the card counts for this region.


5616 
5617     if (!cur->evacuation_failed()) {
5618       MemRegion used_mr = cur->used_region();
5619 
5620       // And the region is empty.
5621       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5622       pre_used += cur->used();
5623       free_region(cur, &local_free_list, false /* par */, true /* locked */);
5624     } else {
5625       cur->uninstall_surv_rate_group();
5626       if (cur->is_young()) {
5627         cur->set_young_index_in_cset(-1);
5628       }
5629       cur->set_evacuation_failed(false);
5630       // When moving a young gen region to old gen, we "allocate" that whole region
5631       // there. This is in addition to any already evacuated objects. Notify the
5632       // policy about that.
5633       // Old gen regions do not cause an additional allocation: both the objects
5634       // still in the region and the ones already moved are accounted for elsewhere.
5635       if (cur->is_young()) {
5636         policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
5637       }
5638       // The region is now considered to be old.
5639       cur->set_old();
5640       // Do some allocation statistics accounting. Regions that failed evacuation
5641       // are always made old, so there is no need to update anything in the young
5642       // gen statistics, but we need to update old gen statistics.
5643       size_t used_words = cur->marked_bytes() / HeapWordSize;
5644       _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5645       _old_set.add(cur);
5646       evacuation_info.increment_collectionset_used_after(cur->used());
5647     }
5648     cur = next;
5649   }
5650 
5651   evacuation_info.set_regions_freed(local_free_list.length());
5652   policy->record_max_rs_lengths(rs_lengths);
5653   policy->cset_regions_freed();
5654 
5655   double end_sec = os::elapsedTime();
5656   double elapsed_ms = (end_sec - start_sec) * 1000.0;


< prev index next >