< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 8688 : 8131319: Move G1Allocator::_summary_bytes_used back to G1CollectedHeap
Reviewed-by:

*** 630,640 **** assert(hr == NULL || (hr->end() == new_end && hr->top() == new_top), "sanity"); check_bitmaps("Humongous Region Allocation", first_hr); assert(first_hr->used() == word_size * HeapWordSize, "invariant"); ! _allocator->increase_used(first_hr->used()); _humongous_set.add(first_hr); return new_obj; } --- 630,640 ---- assert(hr == NULL || (hr->end() == new_end && hr->top() == new_top), "sanity"); check_bitmaps("Humongous Region Allocation", first_hr); assert(first_hr->used() == word_size * HeapWordSize, "invariant"); ! increase_used(first_hr->used()); _humongous_set.add(first_hr); return new_obj; }
*** 996,1006 **** // region, skip it, just adjusting the recorded top. HeapRegion* start_region = _hrm.addr_to_region(start_address); if ((prev_last_region != NULL) && (start_region == prev_last_region)) { start_address = start_region->end(); if (start_address > last_address) { ! _allocator->increase_used(word_size * HeapWordSize); start_region->set_top(last_address + 1); continue; } start_region->set_top(start_address); curr_range = MemRegion(start_address, last_address + 1); --- 996,1006 ---- // region, skip it, just adjusting the recorded top. HeapRegion* start_region = _hrm.addr_to_region(start_address); if ((prev_last_region != NULL) && (start_region == prev_last_region)) { start_address = start_region->end(); if (start_address > last_address) { ! increase_used(word_size * HeapWordSize); start_region->set_top(last_address + 1); continue; } start_region->set_top(start_address); curr_range = MemRegion(start_address, last_address + 1);
*** 1010,1020 **** // Perform the actual region allocation, exiting if it fails. // Then note how much new space we have allocated. if (!_hrm.allocate_containing_regions(curr_range, &commits)) { return false; } ! _allocator->increase_used(word_size * HeapWordSize); if (commits != 0) { ergo_verbose1(ErgoHeapSizing, "attempt heap expansion", ergo_format_reason("allocate archive regions") ergo_format_byte("total size"), --- 1010,1020 ---- // Perform the actual region allocation, exiting if it fails. // Then note how much new space we have allocated. if (!_hrm.allocate_containing_regions(curr_range, &commits)) { return false; } ! increase_used(word_size * HeapWordSize); if (commits != 0) { ergo_verbose1(ErgoHeapSizing, "attempt heap expansion", ergo_format_reason("allocate archive regions") ergo_format_byte("total size"),
*** 1102,1112 **** // if the region bottom does not match the range start, or if the previous // range ended within the same G1 region, and there is a gap. if (start_address != bottom_address) { size_t fill_size = pointer_delta(start_address, bottom_address); G1CollectedHeap::fill_with_objects(bottom_address, fill_size); ! _allocator->increase_used(fill_size * HeapWordSize); } } } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, --- 1102,1112 ---- // if the region bottom does not match the range start, or if the previous // range ended within the same G1 region, and there is a gap. if (start_address != bottom_address) { size_t fill_size = pointer_delta(start_address, bottom_address); G1CollectedHeap::fill_with_objects(bottom_address, fill_size); ! increase_used(fill_size * HeapWordSize); } } } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
*** 1928,1937 **** --- 1928,1938 ---- _has_humongous_reclaim_candidates(false), _archive_allocator(NULL), _free_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), + _summary_bytes_used(0), _survivor_plab_stats(YoungPLABSize, PLABWeight), _old_plab_stats(OldPLABSize, PLABWeight), _expand_heap_after_alloc_failure(true), _surviving_young_words(NULL), _old_marking_cycles_started(0),
*** 2369,2387 **** } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! size_t result = _allocator->used(); if (_archive_allocator != NULL) { result += _archive_allocator->used(); } return result; } size_t G1CollectedHeap::used_unlocked() const { ! return _allocator->used_unlocked(); } class SumUsedClosure: public HeapRegionClosure { size_t _used; public: --- 2370,2388 ---- } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions(); if (_archive_allocator != NULL) { result += _archive_allocator->used(); } return result; } size_t G1CollectedHeap::used_unlocked() const { ! return _summary_bytes_used; } class SumUsedClosure: public HeapRegionClosure { size_t _used; public:
*** 4100,4110 **** _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { ! _allocator->set_used(recalculate_used()); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } for (uint i = 0; i < ParallelGCThreads; i++) { if (_evacuation_failed_info_array[i].has_failed()) { --- 4101,4111 ---- _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { ! set_used(recalculate_used()); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } for (uint i = 0; i < ParallelGCThreads; i++) { if (_evacuation_failed_info_array[i].has_failed()) {
*** 4112,4122 **** } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } if (collector_state()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the --- 4113,4123 ---- } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! increase_used(g1_policy()->bytes_copied_during_gc()); } if (collector_state()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the
*** 5743,5753 **** _hrm.insert_list_into_free_list(list); } } void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { ! _allocator->decrease_used(bytes); } class G1ParCleanupCTTask : public AbstractGangTask { G1SATBCardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; --- 5744,5754 ---- _hrm.insert_list_into_free_list(list); } } void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { ! decrease_used(bytes); } class G1ParCleanupCTTask : public AbstractGangTask { G1SATBCardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h;
*** 6393,6402 **** --- 6394,6418 ---- // a collected region was young or old when the full GC was initiated. } _hrm.remove_all_free_regions(); } + void G1CollectedHeap::increase_used(size_t bytes) { + _summary_bytes_used += bytes; + } + + void G1CollectedHeap::decrease_used(size_t bytes) { + assert(_summary_bytes_used >= bytes, + err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT, + _summary_bytes_used, bytes)); + _summary_bytes_used -= bytes; + } + + void G1CollectedHeap::set_used(size_t bytes) { + _summary_bytes_used = bytes; + } + class RebuildRegionSetsClosure : public HeapRegionClosure { private: bool _free_list_only; HeapRegionSet* _old_set; HeapRegionManager* _hrm;
*** 6461,6479 **** RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { ! _allocator->set_used(cl.total_used()); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } } ! assert(_allocator->used_unlocked() == recalculate_used(), ! err_msg("inconsistent _allocator->used_unlocked(), " "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT, ! _allocator->used_unlocked(), recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { _refine_cte_cl->set_concurrent(concurrent); } --- 6477,6495 ---- RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { ! set_used(cl.total_used()); if (_archive_allocator != NULL) { _archive_allocator->clear_used(); } } ! assert(used_unlocked() == recalculate_used(), ! err_msg("inconsistent used_unlocked(), " "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT, ! used_unlocked(), recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { _refine_cte_cl->set_concurrent(concurrent); }
*** 6509,6519 **** size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); ! _allocator->increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used. g1mm()->update_eden_size(); --- 6525,6535 ---- size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); ! increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used. g1mm()->update_eden_size();
< prev index next >