< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7902 : imported patch 8073052-Rename-and-clean-up-the-allocation-manager-hierarchy-in-g1Allocator

*** 666,676 **** assert(hr == NULL || (hr->end() == new_end && hr->top() == new_top), "sanity"); check_bitmaps("Humongous Region Allocation", first_hr); assert(first_hr->used() == word_size * HeapWordSize, "invariant"); ! _allocator->increase_used(first_hr->used()); _humongous_set.add(first_hr); return new_obj; } --- 666,676 ---- assert(hr == NULL || (hr->end() == new_end && hr->top() == new_top), "sanity"); check_bitmaps("Humongous Region Allocation", first_hr); assert(first_hr->used() == word_size * HeapWordSize, "invariant"); ! increase_used(first_hr->used()); _humongous_set.add(first_hr); return new_obj; }
*** 1781,1790 **** --- 1781,1791 ---- _humongous_is_live(), _has_humongous_reclaim_candidates(false), _free_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), + _summary_bytes_used(0), _survivor_plab_stats(YoungPLABSize, PLABWeight), _old_plab_stats(OldPLABSize, PLABWeight), _expand_heap_after_alloc_failure(true), _surviving_young_words(NULL), _old_marking_cycles_started(0),
*** 2216,2230 **** } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! return _allocator->used(); } size_t G1CollectedHeap::used_unlocked() const { ! return _allocator->used_unlocked(); } class SumUsedClosure: public HeapRegionClosure { size_t _used; public: --- 2217,2231 ---- } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! return _summary_bytes_used + _allocator->used_in_alloc_regions(); } size_t G1CollectedHeap::used_unlocked() const { ! return _summary_bytes_used; } class SumUsedClosure: public HeapRegionClosure { size_t _used; public:
*** 3936,3956 **** _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { ! _allocator->set_used(recalculate_used()); uint n_queues = MAX2((int)ParallelGCThreads, 1); for (uint i = 0; i < n_queues; i++) { if (_evacuation_failed_info_array[i].has_failed()) { _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } if (g1_policy()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the --- 3937,3957 ---- _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { ! set_used(recalculate_used()); uint n_queues = MAX2((int)ParallelGCThreads, 1); for (uint i = 0; i < n_queues; i++) { if (_evacuation_failed_info_array[i].has_failed()) { _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); } } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. ! increase_used(g1_policy()->bytes_copied_during_gc()); } if (g1_policy()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the
*** 5779,5789 **** _hrm.insert_list_into_free_list(list); } } void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { ! _allocator->decrease_used(bytes); } class G1ParCleanupCTTask : public AbstractGangTask { G1SATBCardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; --- 5780,5790 ---- _hrm.insert_list_into_free_list(list); } } void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { ! decrease_used(bytes); } class G1ParCleanupCTTask : public AbstractGangTask { G1SATBCardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h;
*** 6495,6510 **** RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { ! _allocator->set_used(cl.total_used()); } ! assert(_allocator->used_unlocked() == recalculate_used(), err_msg("inconsistent _allocator->used_unlocked(), " "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, ! _allocator->used_unlocked(), recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { _refine_cte_cl->set_concurrent(concurrent); } --- 6496,6511 ---- RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { ! set_used(cl.total_used()); } ! assert(used_unlocked() == recalculate_used(), err_msg("inconsistent _allocator->used_unlocked(), " "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, ! used_unlocked(), recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { _refine_cte_cl->set_concurrent(concurrent); }
*** 6540,6550 **** size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); ! _allocator->increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used. g1mm()->update_eden_size(); --- 6541,6551 ---- size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); ! increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its // used space has been recored in _summary_bytes_used. g1mm()->update_eden_size();
< prev index next >