385 // not last one
386 assert(obj_top > hr->end(), "obj_top should be above this region");
387 hr->set_top(hr->end());
388 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
389 }
390 }
391 // If we have continues humongous regions (hr != NULL), its top should
392 // match obj_top.
393 assert(hr == NULL || (hr->top() == obj_top), "sanity");
394 check_bitmaps("Humongous Region Allocation", first_hr);
395
396 increase_used(word_size * HeapWordSize);
397
398 for (uint i = first; i < last; ++i) {
399 _humongous_set.add(region_at(i));
400 }
401
402 return new_obj;
403 }
404
405 // If could fit into free regions w/o expansion, try.
406 // Otherwise, if can expand, do so.
407 // Otherwise, if using ex regions might help, try with ex given back.
408 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
409 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
410
411 verify_region_sets_optional();
412
413 uint first = G1_NO_HRM_INDEX;
414 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
415
416 if (obj_regions == 1) {
417 // Only one region to allocate, try to use a fast path by directly allocating
418 // from the free lists. Do not try to expand here, we will potentially do that
419 // later.
420 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
421 if (hr != NULL) {
422 first = hr->hrm_index();
423 }
424 } else {
425 // We can't allocate humongous regions spanning more than one region while
426 // cleanupComplete() is running, since some of the regions we find to be
427 // empty might not yet be added to the free list. It is not straightforward
428 // to know in which list they are on so that we can remove them. We only
429 // need to do this if we need to allocate more than one region to satisfy the
430 // current humongous allocation request. If we are only allocating one region
431 // we use the one-region region allocation code (see above), that already
432 // potentially waits for regions from the secondary free list.
433 wait_while_free_regions_coming();
434 append_secondary_free_list_if_not_empty_with_lock();
994 collect(GCCause::_g1_humongous_allocation);
995 }
996
997 // We will loop until a) we manage to successfully perform the
998 // allocation or b) we successfully schedule a collection which
999 // fails to perform the allocation. b) is the only case when we'll
1000 // return NULL.
1001 HeapWord* result = NULL;
1002 for (int try_count = 1; /* we'll return */; try_count += 1) {
1003 bool should_try_gc;
1004 uint gc_count_before;
1005
1006 {
1007 MutexLockerEx x(Heap_lock);
1008
1009 // Given that humongous objects are not allocated in young
1010 // regions, we'll first try to do the allocation without doing a
1011 // collection hoping that there's enough space in the heap.
1012 result = humongous_obj_allocate(word_size, AllocationContext::current());
1013 if (result != NULL) {
1014 return result;
1015 }
1016
1017 if (GC_locker::is_active_and_needs_gc()) {
1018 should_try_gc = false;
1019 } else {
1020 // The GCLocker may not be active but the GCLocker initiated
1021 // GC may not yet have been performed (GCLocker::needs_gc()
1022 // returns true). In this case we do not try this GC and
1023 // wait until the GCLocker initiated GC is performed, and
1024 // then retry the allocation.
1025 if (GC_locker::needs_gc()) {
1026 should_try_gc = false;
1027 } else {
1028 // Read the GC count while still holding the Heap_lock.
1029 gc_count_before = total_collections();
1030 should_try_gc = true;
1031 }
1032 }
1033 }
5227
5228 _survivor_evac_stats.adjust_desired_plab_sz();
5229 _old_evac_stats.adjust_desired_plab_sz();
5230
5231 // Reset and re-enable the hot card cache.
5232 // Note the counts for the cards in the regions in the
5233 // collection set are reset when the collection set is freed.
5234 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5235 hot_card_cache->reset_hot_cache();
5236 hot_card_cache->set_use_cache(true);
5237
5238 purge_code_root_memory();
5239
5240 redirty_logged_cards();
5241 #if defined(COMPILER2) || INCLUDE_JVMCI
5242 DerivedPointerTable::update_pointers();
5243 #endif
5244 }
5245
5246 void G1CollectedHeap::record_obj_copy_mem_stats() {
5247 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5248 create_g1_evac_summary(&_old_evac_stats));
5249 }
5250
5251 void G1CollectedHeap::free_region(HeapRegion* hr,
5252 FreeRegionList* free_list,
5253 bool par,
5254 bool locked) {
5255 assert(!hr->is_free(), "the region should not be free");
5256 assert(!hr->is_empty(), "the region should not be empty");
5257 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5258 assert(free_list != NULL, "pre-condition");
5259
5260 if (G1VerifyBitmaps) {
5261 MemRegion mr(hr->bottom(), hr->end());
5262 concurrent_mark()->clearRangePrevBitmap(mr);
5263 }
5264
5265 // Clear the card counts for this region.
5266 // Note: we only need to do this if the region is not young
5611 assert(index == -1, "invariant");
5612 }
5613
5614 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5615 (!cur->is_young() && cur->young_index_in_cset() == -1),
5616 "invariant" );
5617
5618 if (!cur->evacuation_failed()) {
5619 MemRegion used_mr = cur->used_region();
5620
5621 // And the region is empty.
5622 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5623 pre_used += cur->used();
5624 free_region(cur, &local_free_list, false /* par */, true /* locked */);
5625 } else {
5626 cur->uninstall_surv_rate_group();
5627 if (cur->is_young()) {
5628 cur->set_young_index_in_cset(-1);
5629 }
5630 cur->set_evacuation_failed(false);
5631 // The region is now considered to be old.
5632 cur->set_old();
5633 // Do some allocation statistics accounting. Regions that failed evacuation
5634 // are always made old, so there is no need to update anything in the young
5635 // gen statistics, but we need to update old gen statistics.
5636 size_t used_words = cur->marked_bytes() / HeapWordSize;
5637 _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5638 _old_set.add(cur);
5639 evacuation_info.increment_collectionset_used_after(cur->used());
5640 }
5641 cur = next;
5642 }
5643
5644 evacuation_info.set_regions_freed(local_free_list.length());
5645 policy->record_max_rs_lengths(rs_lengths);
5646 policy->cset_regions_freed();
5647
5648 double end_sec = os::elapsedTime();
5649 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5650
|
385 // not last one
386 assert(obj_top > hr->end(), "obj_top should be above this region");
387 hr->set_top(hr->end());
388 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
389 }
390 }
391 // If we have continues humongous regions (hr != NULL), its top should
392 // match obj_top.
393 assert(hr == NULL || (hr->top() == obj_top), "sanity");
394 check_bitmaps("Humongous Region Allocation", first_hr);
395
396 increase_used(word_size * HeapWordSize);
397
398 for (uint i = first; i < last; ++i) {
399 _humongous_set.add(region_at(i));
400 }
401
402 return new_obj;
403 }
404
405 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
406 assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
407 return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
408 }
409
410 // If could fit into free regions w/o expansion, try.
411 // Otherwise, if can expand, do so.
412 // Otherwise, if using ex regions might help, try with ex given back.
413 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
414 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
415
416 verify_region_sets_optional();
417
418 uint first = G1_NO_HRM_INDEX;
419 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
420
421 if (obj_regions == 1) {
422 // Only one region to allocate, try to use a fast path by directly allocating
423 // from the free lists. Do not try to expand here, we will potentially do that
424 // later.
425 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
426 if (hr != NULL) {
427 first = hr->hrm_index();
428 }
429 } else {
430 // We can't allocate humongous regions spanning more than one region while
431 // cleanupComplete() is running, since some of the regions we find to be
432 // empty might not yet be added to the free list. It is not straightforward
433 // to know in which list they are on so that we can remove them. We only
434 // need to do this if we need to allocate more than one region to satisfy the
435 // current humongous allocation request. If we are only allocating one region
436 // we use the one-region region allocation code (see above), that already
437 // potentially waits for regions from the secondary free list.
438 wait_while_free_regions_coming();
439 append_secondary_free_list_if_not_empty_with_lock();
999 collect(GCCause::_g1_humongous_allocation);
1000 }
1001
1002 // We will loop until a) we manage to successfully perform the
1003 // allocation or b) we successfully schedule a collection which
1004 // fails to perform the allocation. b) is the only case when we'll
1005 // return NULL.
1006 HeapWord* result = NULL;
1007 for (int try_count = 1; /* we'll return */; try_count += 1) {
1008 bool should_try_gc;
1009 uint gc_count_before;
1010
1011 {
1012 MutexLockerEx x(Heap_lock);
1013
1014 // Given that humongous objects are not allocated in young
1015 // regions, we'll first try to do the allocation without doing a
1016 // collection hoping that there's enough space in the heap.
1017 result = humongous_obj_allocate(word_size, AllocationContext::current());
1018 if (result != NULL) {
1019 g1_policy()->add_last_old_allocated_bytes(humongous_obj_size_in_regions(word_size) * HeapRegion::GrainBytes);
1020 return result;
1021 }
1022
1023 if (GC_locker::is_active_and_needs_gc()) {
1024 should_try_gc = false;
1025 } else {
1026 // The GCLocker may not be active but the GCLocker initiated
1027 // GC may not yet have been performed (GCLocker::needs_gc()
1028 // returns true). In this case we do not try this GC and
1029 // wait until the GCLocker initiated GC is performed, and
1030 // then retry the allocation.
1031 if (GC_locker::needs_gc()) {
1032 should_try_gc = false;
1033 } else {
1034 // Read the GC count while still holding the Heap_lock.
1035 gc_count_before = total_collections();
1036 should_try_gc = true;
1037 }
1038 }
1039 }
5233
5234 _survivor_evac_stats.adjust_desired_plab_sz();
5235 _old_evac_stats.adjust_desired_plab_sz();
5236
5237 // Reset and re-enable the hot card cache.
5238 // Note the counts for the cards in the regions in the
5239 // collection set are reset when the collection set is freed.
5240 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5241 hot_card_cache->reset_hot_cache();
5242 hot_card_cache->set_use_cache(true);
5243
5244 purge_code_root_memory();
5245
5246 redirty_logged_cards();
5247 #if defined(COMPILER2) || INCLUDE_JVMCI
5248 DerivedPointerTable::update_pointers();
5249 #endif
5250 }
5251
5252 void G1CollectedHeap::record_obj_copy_mem_stats() {
5253 g1_policy()->add_last_old_allocated_bytes(_old_evac_stats.allocated() * HeapWordSize);
5254
5255 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5256 create_g1_evac_summary(&_old_evac_stats));
5257 }
5258
5259 void G1CollectedHeap::free_region(HeapRegion* hr,
5260 FreeRegionList* free_list,
5261 bool par,
5262 bool locked) {
5263 assert(!hr->is_free(), "the region should not be free");
5264 assert(!hr->is_empty(), "the region should not be empty");
5265 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5266 assert(free_list != NULL, "pre-condition");
5267
5268 if (G1VerifyBitmaps) {
5269 MemRegion mr(hr->bottom(), hr->end());
5270 concurrent_mark()->clearRangePrevBitmap(mr);
5271 }
5272
5273 // Clear the card counts for this region.
5274 // Note: we only need to do this if the region is not young
5619 assert(index == -1, "invariant");
5620 }
5621
5622 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5623 (!cur->is_young() && cur->young_index_in_cset() == -1),
5624 "invariant" );
5625
5626 if (!cur->evacuation_failed()) {
5627 MemRegion used_mr = cur->used_region();
5628
5629 // And the region is empty.
5630 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5631 pre_used += cur->used();
5632 free_region(cur, &local_free_list, false /* par */, true /* locked */);
5633 } else {
5634 cur->uninstall_surv_rate_group();
5635 if (cur->is_young()) {
5636 cur->set_young_index_in_cset(-1);
5637 }
5638 cur->set_evacuation_failed(false);
5639 // When moving a young gen region to old gen, we "allocate" that whole region
5640 // there. This is in addition to any already evacuated objects. Notify the
5641 // policy about that.
5642 // Old gen regions do not cause an additional allocation: both the objects
5643 // still in the region and the ones already moved are accounted for elsewhere.
5644 if (cur->is_young()) {
5645 policy->add_last_old_allocated_bytes(HeapRegion::GrainBytes);
5646 }
5647 // The region is now considered to be old.
5648 cur->set_old();
5649 // Do some allocation statistics accounting. Regions that failed evacuation
5650 // are always made old, so there is no need to update anything in the young
5651 // gen statistics, but we need to update old gen statistics.
5652 size_t used_words = cur->marked_bytes() / HeapWordSize;
5653 _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
5654 _old_set.add(cur);
5655 evacuation_info.increment_collectionset_used_after(cur->used());
5656 }
5657 cur = next;
5658 }
5659
5660 evacuation_info.set_regions_freed(local_free_list.length());
5661 policy->record_max_rs_lengths(rs_lengths);
5662 policy->cset_regions_freed();
5663
5664 double end_sec = os::elapsedTime();
5665 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5666
|