385 hr->set_top(hr->end());
386 if (_hr_printer.is_active()) {
387 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
388 }
389 }
390
391 assert(hr == NULL || (hr->bottom() < obj_top && obj_top <= hr->end()),
392 "obj_top should be in last region");
393
394 check_bitmaps("Humongous Region Allocation", first_hr);
395
396 increase_used(word_size_sum * HeapWordSize);
397
398 for (uint i = first; i < last; ++i) {
399 _humongous_set.add(region_at(i));
400 }
401
402 return new_obj;
403 }
404
405 // If could fit into free regions w/o expansion, try.
406 // Otherwise, if can expand, do so.
407 // Otherwise, if using ex regions might help, try with ex given back.
408 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
409 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
410
411 verify_region_sets_optional();
412
413 uint first = G1_NO_HRM_INDEX;
414 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
415
416 if (obj_regions == 1) {
417 // Only one region to allocate, try to use a fast path by directly allocating
418 // from the free lists. Do not try to expand here, we will potentially do that
419 // later.
420 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
421 if (hr != NULL) {
422 first = hr->hrm_index();
423 }
424 } else {
425 // We can't allocate humongous regions spanning more than one region while
426 // cleanupComplete() is running, since some of the regions we find to be
427 // empty might not yet be added to the free list. It is not straightforward
428 // to know in which list they are on so that we can remove them. We only
429 // need to do this if we need to allocate more than one region to satisfy the
430 // current humongous allocation request. If we are only allocating one region
431 // we use the one-region region allocation code (see above), that already
432 // potentially waits for regions from the secondary free list.
433 wait_while_free_regions_coming();
434 append_secondary_free_list_if_not_empty_with_lock();
994 collect(GCCause::_g1_humongous_allocation);
995 }
996
997 // We will loop until a) we manage to successfully perform the
998 // allocation or b) we successfully schedule a collection which
999 // fails to perform the allocation. b) is the only case when we'll
1000 // return NULL.
1001 HeapWord* result = NULL;
1002 for (int try_count = 1; /* we'll return */; try_count += 1) {
1003 bool should_try_gc;
1004 uint gc_count_before;
1005
1006 {
1007 MutexLockerEx x(Heap_lock);
1008
1009 // Given that humongous objects are not allocated in young
1010 // regions, we'll first try to do the allocation without doing a
1011 // collection hoping that there's enough space in the heap.
1012 result = humongous_obj_allocate(word_size, AllocationContext::current());
1013 if (result != NULL) {
1014 return result;
1015 }
1016
1017 if (GC_locker::is_active_and_needs_gc()) {
1018 should_try_gc = false;
1019 } else {
1020 // The GCLocker may not be active but the GCLocker initiated
1021 // GC may not yet have been performed (GCLocker::needs_gc()
1022 // returns true). In this case we do not try this GC and
1023 // wait until the GCLocker initiated GC is performed, and
1024 // then retry the allocation.
1025 if (GC_locker::needs_gc()) {
1026 should_try_gc = false;
1027 } else {
1028 // Read the GC count while still holding the Heap_lock.
1029 gc_count_before = total_collections();
1030 should_try_gc = true;
1031 }
1032 }
1033 }
5217
5218 _survivor_evac_stats.adjust_desired_plab_sz();
5219 _old_evac_stats.adjust_desired_plab_sz();
5220
5221 // Reset and re-enable the hot card cache.
5222 // Note the counts for the cards in the regions in the
5223 // collection set are reset when the collection set is freed.
5224 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5225 hot_card_cache->reset_hot_cache();
5226 hot_card_cache->set_use_cache(true);
5227
5228 purge_code_root_memory();
5229
5230 redirty_logged_cards();
5231 #if defined(COMPILER2) || INCLUDE_JVMCI
5232 DerivedPointerTable::update_pointers();
5233 #endif
5234 }
5235
5236 void G1CollectedHeap::record_obj_copy_mem_stats() {
5237 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5238 create_g1_evac_summary(&_old_evac_stats));
5239 }
5240
5241 void G1CollectedHeap::free_region(HeapRegion* hr,
5242 FreeRegionList* free_list,
5243 bool par,
5244 bool locked) {
5245 assert(!hr->is_free(), "the region should not be free");
5246 assert(!hr->is_empty(), "the region should not be empty");
5247 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5248 assert(free_list != NULL, "pre-condition");
5249
5250 if (G1VerifyBitmaps) {
5251 MemRegion mr(hr->bottom(), hr->end());
5252 concurrent_mark()->clearRangePrevBitmap(mr);
5253 }
5254
5255 // Clear the card counts for this region.
5256 // Note: we only need to do this if the region is not young
5601 assert(index == -1, "invariant");
5602 }
5603
5604 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5605 (!cur->is_young() && cur->young_index_in_cset() == -1),
5606 "invariant" );
5607
5608 if (!cur->evacuation_failed()) {
5609 MemRegion used_mr = cur->used_region();
5610
5611 // And the region is empty.
5612 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5613 pre_used += cur->used();
5614 free_region(cur, &local_free_list, false /* par */, true /* locked */);
5615 } else {
5616 cur->uninstall_surv_rate_group();
5617 if (cur->is_young()) {
5618 cur->set_young_index_in_cset(-1);
5619 }
5620 cur->set_evacuation_failed(false);
5621 // The region is now considered to be old.
5622 cur->set_old();
5623 // Do some allocation statistics accounting. Regions that failed evacuation
5624 // are always made old, so there is no need to update anything in the young
5625 // gen statistics, but we need to update old gen statistics.
5626 size_t used_words = cur->marked_bytes() / HeapWordSize;
5627 _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5628 _old_set.add(cur);
5629 evacuation_info.increment_collectionset_used_after(cur->used());
5630 }
5631 cur = next;
5632 }
5633
5634 evacuation_info.set_regions_freed(local_free_list.length());
5635 policy->record_max_rs_lengths(rs_lengths);
5636 policy->cset_regions_freed();
5637
5638 double end_sec = os::elapsedTime();
5639 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5640
|
385 hr->set_top(hr->end());
386 if (_hr_printer.is_active()) {
387 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
388 }
389 }
390
391 assert(hr == NULL || (hr->bottom() < obj_top && obj_top <= hr->end()),
392 "obj_top should be in last region");
393
394 check_bitmaps("Humongous Region Allocation", first_hr);
395
396 increase_used(word_size_sum * HeapWordSize);
397
398 for (uint i = first; i < last; ++i) {
399 _humongous_set.add(region_at(i));
400 }
401
402 return new_obj;
403 }
404
405 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
406 assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
407 return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
408 }
409
410 // If could fit into free regions w/o expansion, try.
411 // Otherwise, if can expand, do so.
412 // Otherwise, if using ex regions might help, try with ex given back.
413 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
414 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
415
416 verify_region_sets_optional();
417
418 uint first = G1_NO_HRM_INDEX;
419 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
420
421 if (obj_regions == 1) {
422 // Only one region to allocate, try to use a fast path by directly allocating
423 // from the free lists. Do not try to expand here, we will potentially do that
424 // later.
425 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
426 if (hr != NULL) {
427 first = hr->hrm_index();
428 }
429 } else {
430 // We can't allocate humongous regions spanning more than one region while
431 // cleanupComplete() is running, since some of the regions we find to be
432 // empty might not yet be added to the free list. It is not straightforward
433 // to know in which list they are on so that we can remove them. We only
434 // need to do this if we need to allocate more than one region to satisfy the
435 // current humongous allocation request. If we are only allocating one region
436 // we use the one-region region allocation code (see above), that already
437 // potentially waits for regions from the secondary free list.
438 wait_while_free_regions_coming();
439 append_secondary_free_list_if_not_empty_with_lock();
999 collect(GCCause::_g1_humongous_allocation);
1000 }
1001
1002 // We will loop until a) we manage to successfully perform the
1003 // allocation or b) we successfully schedule a collection which
1004 // fails to perform the allocation. b) is the only case when we'll
1005 // return NULL.
1006 HeapWord* result = NULL;
1007 for (int try_count = 1; /* we'll return */; try_count += 1) {
1008 bool should_try_gc;
1009 uint gc_count_before;
1010
1011 {
1012 MutexLockerEx x(Heap_lock);
1013
1014 // Given that humongous objects are not allocated in young
1015 // regions, we'll first try to do the allocation without doing a
1016 // collection hoping that there's enough space in the heap.
1017 result = humongous_obj_allocate(word_size, AllocationContext::current());
1018 if (result != NULL) {
1019 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
1020 g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
1021 return result;
1022 }
1023
1024 if (GC_locker::is_active_and_needs_gc()) {
1025 should_try_gc = false;
1026 } else {
1027 // The GCLocker may not be active but the GCLocker initiated
1028 // GC may not yet have been performed (GCLocker::needs_gc()
1029 // returns true). In this case we do not try this GC and
1030 // wait until the GCLocker initiated GC is performed, and
1031 // then retry the allocation.
1032 if (GC_locker::needs_gc()) {
1033 should_try_gc = false;
1034 } else {
1035 // Read the GC count while still holding the Heap_lock.
1036 gc_count_before = total_collections();
1037 should_try_gc = true;
1038 }
1039 }
1040 }
5224
5225 _survivor_evac_stats.adjust_desired_plab_sz();
5226 _old_evac_stats.adjust_desired_plab_sz();
5227
5228 // Reset and re-enable the hot card cache.
5229 // Note the counts for the cards in the regions in the
5230 // collection set are reset when the collection set is freed.
5231 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5232 hot_card_cache->reset_hot_cache();
5233 hot_card_cache->set_use_cache(true);
5234
5235 purge_code_root_memory();
5236
5237 redirty_logged_cards();
5238 #if defined(COMPILER2) || INCLUDE_JVMCI
5239 DerivedPointerTable::update_pointers();
5240 #endif
5241 }
5242
5243 void G1CollectedHeap::record_obj_copy_mem_stats() {
5244 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
5245
5246 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5247 create_g1_evac_summary(&_old_evac_stats));
5248 }
5249
5250 void G1CollectedHeap::free_region(HeapRegion* hr,
5251 FreeRegionList* free_list,
5252 bool par,
5253 bool locked) {
5254 assert(!hr->is_free(), "the region should not be free");
5255 assert(!hr->is_empty(), "the region should not be empty");
5256 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5257 assert(free_list != NULL, "pre-condition");
5258
5259 if (G1VerifyBitmaps) {
5260 MemRegion mr(hr->bottom(), hr->end());
5261 concurrent_mark()->clearRangePrevBitmap(mr);
5262 }
5263
5264 // Clear the card counts for this region.
5265 // Note: we only need to do this if the region is not young
5610 assert(index == -1, "invariant");
5611 }
5612
5613 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5614 (!cur->is_young() && cur->young_index_in_cset() == -1),
5615 "invariant" );
5616
5617 if (!cur->evacuation_failed()) {
5618 MemRegion used_mr = cur->used_region();
5619
5620 // And the region is empty.
5621 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5622 pre_used += cur->used();
5623 free_region(cur, &local_free_list, false /* par */, true /* locked */);
5624 } else {
5625 cur->uninstall_surv_rate_group();
5626 if (cur->is_young()) {
5627 cur->set_young_index_in_cset(-1);
5628 }
5629 cur->set_evacuation_failed(false);
5630 // When moving a young gen region to old gen, we "allocate" that whole region
5631 // there. This is in addition to any already evacuated objects. Notify the
5632 // policy about that.
5633 // Old gen regions do not cause an additional allocation: both the objects
5634 // still in the region and the ones already moved are accounted for elsewhere.
5635 if (cur->is_young()) {
5636 policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
5637 }
5638 // The region is now considered to be old.
5639 cur->set_old();
5640 // Do some allocation statistics accounting. Regions that failed evacuation
5641 // are always made old, so there is no need to update anything in the young
5642 // gen statistics, but we need to update old gen statistics.
5643 size_t used_words = cur->marked_bytes() / HeapWordSize;
5644 _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5645 _old_set.add(cur);
5646 evacuation_info.increment_collectionset_used_after(cur->used());
5647 }
5648 cur = next;
5649 }
5650
5651 evacuation_info.set_regions_freed(local_free_list.length());
5652 policy->record_max_rs_lengths(rs_lengths);
5653 policy->cset_regions_freed();
5654
5655 double end_sec = os::elapsedTime();
5656 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5657
|