< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56448 : imported patch 8220310.mut.0
rev 56449 : imported patch 8220310.mut.1_thomas
rev 56451 : imported patch 8220310.mut.1-3_kim
rev 56452 : imported patch 8220310.mut.2-stefan
rev 56453 : imported patch 8220310.mut.2-kim
rev 56454 : [mq]: 8220310.mut.2-evensplit

*** 167,182 **** return new HeapRegion(hrs_index, bot(), mr); } // Private methods. ! HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) { assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, "the only time we use this to allocate a humongous region is " "when we are allocating a single humongous region"); ! HeapRegion* res = _hrm->allocate_free_region(type); if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint. If this assumption changes we might have to --- 167,185 ---- return new HeapRegion(hrs_index, bot(), mr); } // Private methods. ! HeapRegion* G1CollectedHeap::new_region(size_t word_size, ! HeapRegionType type, ! bool do_expand, ! uint node_index) { assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, "the only time we use this to allocate a humongous region is " "when we are allocating a single humongous region"); ! HeapRegion* res = _hrm->allocate_free_region(type, node_index); if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint. If this assumption changes we might have to
*** 184,199 **** assert(SafepointSynchronize::is_at_safepoint(), "invariant"); log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); ! if (expand(word_size * HeapWordSize)) { ! // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL. ! res = _hrm->allocate_free_region(type); } else { _expand_heap_after_alloc_failure = false; } } return res; --- 187,205 ---- assert(SafepointSynchronize::is_at_safepoint(), "invariant"); log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); ! assert(word_size * HeapWordSize < HeapRegion::GrainBytes, ! "This kind of expansion should never be more than one region. Size: " SIZE_FORMAT, ! word_size * HeapWordSize); ! if (expand_single_region(node_index)) { ! // Given that expand_single_region() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL. ! res = _hrm->allocate_free_region(type, node_index); } else { _expand_heap_after_alloc_failure = false; } } return res;
*** 1020,1030 **** concurrent_mark()->concurrent_cycle_abort(); } void G1CollectedHeap::prepare_heap_for_full_collection() { // Make sure we'll choose a new allocation region afterwards. ! _allocator->release_mutator_alloc_region(); _allocator->abandon_gc_alloc_regions(); // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh --- 1026,1036 ---- concurrent_mark()->concurrent_cycle_abort(); } void G1CollectedHeap::prepare_heap_for_full_collection() { // Make sure we'll choose a new allocation region afterwards. ! _allocator->release_mutator_alloc_regions(); _allocator->abandon_gc_alloc_regions(); // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh
*** 1064,1074 **** purge_code_root_memory(); // Start a new incremental collection set for the next pause start_new_collection_set(); ! _allocator->init_mutator_alloc_region(); // Post collection state updates. MetaspaceGC::compute_new_size(); } --- 1070,1080 ---- purge_code_root_memory(); // Start a new incremental collection set for the next pause start_new_collection_set(); ! _allocator->init_mutator_alloc_regions(); // Post collection state updates. MetaspaceGC::compute_new_size(); }
*** 1381,1401 **** } } return regions_to_expand > 0; } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_down(aligned_shrink_bytes, HeapRegion::GrainBytes); uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; - log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { policy()->record_new_heap_size(num_regions()); } else { --- 1387,1421 ---- } } return regions_to_expand > 0; } + bool G1CollectedHeap::expand_single_region(uint node_index) { + log_debug(gc, ergo, heap)("Expand the heap by a single region"); + + uint expanded_by = _hrm->expand_on_preferred_node(node_index); + + if (expanded_by == 0) { + assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available()); + log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); + return false; + } + + policy()->record_new_heap_size(num_regions()); + return true; + } + void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_down(aligned_shrink_bytes, HeapRegion::GrainBytes); uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { policy()->record_new_heap_size(num_regions()); } else {
*** 1493,1502 **** --- 1513,1523 ---- _old_set("Old Region Set", new OldRegionSetChecker()), _archive_set("Archive Region Set", new ArchiveRegionSetChecker()), _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()), _bot(NULL), _listener(), + _mem_node_mgr(G1MemoryNodeManager::create()), _hrm(NULL), _allocator(NULL), _verifier(NULL), _summary_bytes_used(0), _archive_allocator(NULL),
*** 1776,1785 **** --- 1797,1808 ---- if (_workers == NULL) { return JNI_ENOMEM; } _workers->initialize_workers(); + _mem_node_mgr->set_page_size(page_size); + // Create the G1ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); if (_cm == NULL || !_cm->completed_initialization()) { vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
*** 1823,1833 **** dummy_region->set_eden(); // Make sure it's full. dummy_region->set_top(dummy_region->end()); G1AllocRegion::setup(this, dummy_region); ! _allocator->init_mutator_alloc_region(); // Do create of the monitoring and management support so that // values in the heap have been properly initialized. _g1mm = new G1MonitoringSupport(this); --- 1846,1856 ---- dummy_region->set_eden(); // Make sure it's full. dummy_region->set_top(dummy_region->end()); G1AllocRegion::setup(this, dummy_region); ! _allocator->init_mutator_alloc_regions(); // Do create of the monitoring and management support so that // values in the heap have been properly initialized. _g1mm = new G1MonitoringSupport(this);
*** 3006,3016 **** policy()->record_collection_pause_start(sample_start_time_sec); // Forget the current allocation region (we might even choose it to be part // of the collection set!). ! _allocator->release_mutator_alloc_region(); calculate_collection_set(evacuation_info, target_pause_time_ms); G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()); G1ParScanThreadStateSet per_thread_states(this, --- 3029,3039 ---- policy()->record_collection_pause_start(sample_start_time_sec); // Forget the current allocation region (we might even choose it to be part // of the collection set!). ! _allocator->release_mutator_alloc_regions(); calculate_collection_set(evacuation_info, target_pause_time_ms); G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()); G1ParScanThreadStateSet per_thread_states(this,
*** 3043,3053 **** // the current thread has completed its logging output. } allocate_dummy_regions(); ! _allocator->init_mutator_alloc_region(); expand_heap_after_young_collection(); double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; --- 3066,3076 ---- // the current thread has completed its logging output. } allocate_dummy_regions(); ! _allocator->init_mutator_alloc_regions(); expand_heap_after_young_collection(); double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
*** 4546,4562 **** } // Methods for the mutator alloc region HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, ! bool force) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); bool should_allocate = policy()->should_allocate_mutator_region(); if (force || should_allocate) { HeapRegion* new_alloc_region = new_region(word_size, HeapRegionType::Eden, ! false /* do_expand */); if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, !should_allocate); _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region); _policy->remset_tracker()->update_at_allocate(new_alloc_region); --- 4569,4587 ---- } // Methods for the mutator alloc region HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, ! bool force, ! uint node_index) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); bool should_allocate = policy()->should_allocate_mutator_region(); if (force || should_allocate) { HeapRegion* new_alloc_region = new_region(word_size, HeapRegionType::Eden, ! false /* do_expand */, ! node_index); if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, !should_allocate); _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region); _policy->remset_tracker()->update_at_allocate(new_alloc_region);
< prev index next >