< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

*** 402,412 **** // Returns true if the reference points to an object that // can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { HeapRegion* hr = heap_region_containing(p); ! return !hr->is_humongous(); } // Private methods. HeapRegion* --- 402,412 ---- // Returns true if the reference points to an object that // can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { HeapRegion* hr = heap_region_containing(p); ! return !hr->is_pinned(); } // Private methods. HeapRegion*
*** 905,914 **** --- 905,1121 ---- ShouldNotReachHere(); return NULL; } + void G1CollectedHeap::begin_record_alloc_range() { + assert_at_safepoint(true /* should_be_vm_thread */); + if (_recording_allocator == NULL) { + _recording_allocator = G1RecordingAllocator::create_allocator(this); + } + } + + bool G1CollectedHeap::is_record_alloc_too_large(size_t word_size) { + // Check whether the size would be considered humongous for a minimum-sized region. + return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words()); + } + + HeapWord* G1CollectedHeap::record_mem_allocate(size_t word_size) { + assert_at_safepoint(true /* should_be_vm_thread */); + assert(_recording_allocator != NULL, "_recording_allocator not initialized"); + + // Return NULL if the size would be considered humongous for a minimum-sized region. + // Otherwise, attempt to perform the allocation in the recorded space. + if (is_record_alloc_too_large(word_size)) { + return NULL; + } + return _recording_allocator->record_mem_allocate(word_size); + } + + void G1CollectedHeap::end_record_alloc_range(GrowableArray<MemRegion>* ranges, + uint end_alignment) { + assert_at_safepoint(true /* should_be_vm_thread */); + assert(_recording_allocator != NULL, "recording allocator uninitialized"); + + // Call complete_recording to do the real work, filling in the MemRegion + // array with the recorded regions. + _recording_allocator->complete_recording(ranges, end_alignment); + } + + void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size) { + // Create filler objects for the specified range, being careful not to + // create any humongous objects. + if (!is_humongous(word_size)) { + CollectedHeap::fill_with_object(base_address, word_size); + } else { + size_t remainder = word_size; + size_t increment = humongous_threshold_for(HeapRegion::GrainWords) / 2; + HeapWord* fill_top = base_address; + // Don't let remainder get smaller than the minimum filler object size. + while ((remainder > increment) && (remainder - increment >= min_fill_size())) { + CollectedHeap::fill_with_object(fill_top, increment); + fill_top += increment; + remainder -= increment; + } + if (remainder != 0) { + CollectedHeap::fill_with_object(fill_top, remainder); + } + } + } + + bool + G1CollectedHeap::check_archive_addresses(MemRegion* ranges, uint count) { + MemRegion mr = _hrm.reserved(); + for (uint i = 0; i < count; i++) { + if (!mr.contains(ranges[i].start()) || !mr.contains(ranges[i].last())) { + return false; + } + } + return true; + } + + bool + G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, uint count) { + MutexLockerEx x(Heap_lock); + + MemRegion mr = _hrm.reserved(); + HeapWord* prev_end_addr = NULL; + uint prev_end_index = 0; + + // Temporarily disable pretouching of heap pages. This interface is used + // when mmap'ing archived heap data in, so pre-touching is wasted. + FlagSetting fs(AlwaysPreTouch, false); + + // Enable archive object checking in G1MarkSweep. We have to let it know + // about each archive range, so that objects in those ranges aren't marked. + G1MarkSweep::enable_archive_object_check(); + + // For each specified MemRegion range, allocate the corresponding G1 + // regions and mark them as archive regions. + // We expect the ranges in ascending order, without overlap. + for (uint i = 0; i < count; i++) { + HeapWord* base_address = ranges[i].start(); + size_t word_size = ranges[i].word_size(); + HeapWord* end_address = ranges[i].last(); + + assert((base_address > prev_end_addr) && (base_address < end_address), + "invalid range specification"); + + prev_end_addr = end_address; + uint start_index = _hrm.addr_to_index(base_address); + uint end_index = _hrm.addr_to_index(end_address); + + // Check for ranges that begin/end in the same G1 region + // as as the previous range. + if (start_index == prev_end_index) { + if (end_index == prev_end_index) { + break; + } + start_index++; + } + prev_end_index = end_index; + + // Ensure that each contained G1 region is available and free, + // returning false if not. + for (uint curr_index = start_index; curr_index <= end_index; curr_index++) { + HeapRegion* curr_region; + if ((curr_region = _hrm.at_or_null(curr_index)) == NULL) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap expansion", + ergo_format_reason("pinning region") + ergo_format_byte("region size"), + HeapRegion::GrainWords * HeapWordSize); + _hrm.expand_at(curr_index, 1); + } else { + if (!curr_region->is_free()) { + return false; + } + } + } + + _hrm.allocate_free_regions_starting_at(start_index, (end_index - start_index) + 1); + _allocator->increase_used(word_size * HeapWordSize); + + // Mark each G1 region touched by the range as archive, add it to the old set, and set + // the allocation context and top. + for (uint i = start_index; i <= end_index; i++) { + HeapRegion* curr = region_at(i); + assert(curr->is_empty() && !curr->is_pinned(), "Invalid MemRegion"); + _hr_printer.alloc(curr, G1HRPrinter::Archive); + curr->set_allocation_context(AllocationContext::system()); + if (i != end_index) { + curr->set_top(curr->end()); + } else { + curr->set_top(end_address + 1); + } + curr->set_archive(); + _old_set.add(curr); + } + + // Notify mark-sweep of the archive range. + G1MarkSweep::mark_range_archive(base_address, end_address); + } + return true; + } + + void + G1CollectedHeap::fill_archive_regions(MemRegion* ranges, uint count) { + + MemRegion mr = _hrm.reserved(); + HeapWord *prev_end_addr = NULL; + uint prev_end_index = 0; + + // For each MemRegion, create filler objects, if needed, in the G1 regions + // that contain the address range. The address range actually within the + // MemRegion will not be modified. That is assumed to have been initialized + // elsewhere, probably via an mmap of archived heap data. + MutexLockerEx x(Heap_lock); + for (uint i = 0; i < count; i++) { + HeapWord* base_address = ranges[i].start(); + size_t word_size = ranges[i].word_size(); + HeapWord* end_address = ranges[i].last(); + + assert(mr.contains(base_address) && mr.contains(end_address), + "MemRegion outside of heap"); + + uint start_index = _hrm.addr_to_index(base_address); + uint end_index = _hrm.addr_to_index(end_address); + HeapRegion* start_region = _hrm.addr_to_region(base_address); + HeapRegion* end_region = _hrm.addr_to_region(end_address); + HeapWord* bottom_address = start_region->bottom(); + + // Check for a range beginning in the same region in which the + // previous one ended. + if (start_index == prev_end_index) { + bottom_address = prev_end_addr; + start_index++; + } + + #ifdef ASSERT + // Verify the regions were all marked as archive regions by + // alloc_fixed_ranges. + for (uint i = start_index; i <= end_index; i++) { + HeapRegion* curr = region_at(i); + assert(curr->is_archive(), "Invalid range in fill_archive_regions"); + } + #endif + + prev_end_addr = base_address + word_size; + prev_end_index = end_index; + + // Fill the low part of the first allocated region with dummy object(s), + // if the region base does not match the range address, or if the previous + // range ended within the same G1 region, and there is a gap. + if (base_address != bottom_address) { + size_t fill_size = base_address - bottom_address; + G1CollectedHeap::fill_with_non_humongous_objects(bottom_address, fill_size); + _allocator->increase_used(fill_size * HeapWordSize); + } + } + } + + HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged
*** 1129,1138 **** --- 1336,1347 ---- } else { _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); } } else if (hr->is_continues_humongous()) { _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); + } else if (hr->is_archive()) { + _hr_printer->post_compaction(hr, G1HRPrinter::Archive); } else if (hr->is_old()) { _hr_printer->post_compaction(hr, G1HRPrinter::Old); } else { ShouldNotReachHere(); }
*** 1722,1731 **** --- 1931,1941 ---- _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), _humongous_reclaim_candidates(), _has_humongous_reclaim_candidates(false), + _recording_allocator(NULL), _free_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), _survivor_plab_stats(YoungPLABSize, PLABWeight), _old_plab_stats(OldPLABSize, PLABWeight),
*** 1748,1758 **** /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); _workers->initialize_workers(); _allocator = G1Allocator::create_allocator(this); ! _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); --- 1958,1968 ---- /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); _workers->initialize_workers(); _allocator = G1Allocator::create_allocator(this); ! _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
*** 2163,2173 **** } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! return _allocator->used(); } size_t G1CollectedHeap::used_unlocked() const { return _allocator->used_unlocked(); } --- 2373,2387 ---- } // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { ! size_t result = _allocator->used(); ! if (_recording_allocator != NULL) { ! result += _recording_allocator->used(); ! } ! return result; } size_t G1CollectedHeap::used_unlocked() const { return _allocator->used_unlocked(); }
*** 2591,2601 **** } } HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { HeapRegion* result = _hrm.next_region_in_heap(from); ! while (result != NULL && result->is_humongous()) { result = _hrm.next_region_in_heap(result); } return result; } --- 2805,2815 ---- } } HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { HeapRegion* result = _hrm.next_region_in_heap(from); ! while (result != NULL && result->is_pinned()) { result = _hrm.next_region_in_heap(result); } return result; }
*** 2899,2908 **** --- 3113,3148 ---- } } size_t live_bytes() { return _live_bytes; } }; + + class VerifyArchiveOopClosure: public OopClosure { + public: + VerifyArchiveOopClosure(HeapRegion *hr) { } + void do_oop(narrowOop *p) { do_oop_work(p); } + void do_oop( oop *p) { do_oop_work(p); } + + template <class T> void do_oop_work(T *p) { + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj), + "Archive object references a non-pinned object"); + } + }; + + class VerifyArchiveRegionClosure: public ObjectClosure { + public: + VerifyArchiveRegionClosure(HeapRegion *hr) { } + // Verify that all object pointers are to pinned regions. + void do_object(oop o) { + VerifyArchiveOopClosure checkOop(NULL); + assert(o != NULL, "Should not be here for NULL oops"); + o->oop_iterate_no_header(&checkOop); + } + }; + + class VerifyRegionClosure: public HeapRegionClosure { private: bool _par; VerifyOption _vo; bool _failures;
*** 2918,2927 **** --- 3158,3174 ---- bool failures() { return _failures; } bool doHeapRegion(HeapRegion* r) { + // For archive regions, verify there are no heap pointers to + // non-pinned regions. For all others, verify liveness info. + if (r->is_archive()) { + VerifyArchiveRegionClosure verify_oop_pointers(r); + r->object_iterate(&verify_oop_pointers); + return true; + } if (!r->is_continues_humongous()) { bool failures = false; r->verify(_vo, &failures); if (failures) { _failures = true;
*** 3102,3123 **** const HeapRegion* hr, const VerifyOption vo) const { switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); ! case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); default: ShouldNotReachHere(); } return false; // keep some compilers happy } bool G1CollectedHeap::is_obj_dead_cond(const oop obj, const VerifyOption vo) const { switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); ! case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); default: ShouldNotReachHere(); } return false; // keep some compilers happy } --- 3349,3373 ---- const HeapRegion* hr, const VerifyOption vo) const { switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); ! case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive(); default: ShouldNotReachHere(); } return false; // keep some compilers happy } bool G1CollectedHeap::is_obj_dead_cond(const oop obj, const VerifyOption vo) const { switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); ! case VerifyOption_G1UseMarkWord: { ! HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj); ! return !obj->is_gc_marked() && !hr->is_archive(); ! } default: ShouldNotReachHere(); } return false; // keep some compilers happy }
*** 3146,3156 **** // Print the per-region information. st->cr(); st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), " "HS=humongous(starts), HC=humongous(continues), " ! "CS=collection set, F=free, TS=gc time stamp, " "PTAMS=previous top-at-mark-start, " "NTAMS=next top-at-mark-start)"); PrintRegionClosure blk(st); heap_region_iterate(&blk); } --- 3396,3406 ---- // Print the per-region information. st->cr(); st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), " "HS=humongous(starts), HC=humongous(continues), " ! "CS=collection set, F=free, A=archive, TS=gc time stamp, " "PTAMS=previous top-at-mark-start, " "NTAMS=next top-at-mark-start)"); PrintRegionClosure blk(st); heap_region_iterate(&blk); }
*** 3845,3854 **** --- 4095,4107 ---- _young_list->reset_auxilary_lists(); if (evacuation_failed()) { _allocator->set_used(recalculate_used()); + if (_recording_allocator != NULL) { + _recording_allocator->clear_used(); + } for (uint i = 0; i < ParallelGCThreads; i++) { if (_evacuation_failed_info_array[i].has_failed()) { _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); } }
*** 6108,6118 **** } else { // We ignore free regions, we'll empty the free list afterwards. // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. ! assert(r->is_free() || r->is_young() || r->is_humongous(), "it cannot be another type"); } return false; } --- 6361,6372 ---- } else { // We ignore free regions, we'll empty the free list afterwards. // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. ! // We ignore archive regions. ! assert(r->is_free() || r->is_young() || r->is_humongous() || r->is_archive(), "it cannot be another type"); } return false; }
*** 6166,6182 **** _hrm->insert_into_free_list(r); } else if (!_free_list_only) { assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { ! // We ignore humongous regions, we left the humongous set unchanged } else { // Objects that were compacted would have ended up on regions ! // that were previously old or free. assert(r->is_free() || r->is_old(), "invariant"); ! // We now consider them old, so register as such. r->set_old(); _old_set->add(r); } _total_used += r->used(); } --- 6420,6442 ---- _hrm->insert_into_free_list(r); } else if (!_free_list_only) { assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { ! // We ignore humongous regions. ! // We left the humongous set unchanged, } else { // Objects that were compacted would have ended up on regions ! // that were previously old or free. Archive regions (which are ! // old) will not have been touched. assert(r->is_free() || r->is_old(), "invariant"); ! // We now consider them old, so register as such. Leave ! // archive regions set that way, however, while still adding ! // them to the old set. ! if (!r->is_archive()) { r->set_old(); + } _old_set->add(r); } _total_used += r->used(); }
*** 6198,6207 **** --- 6458,6470 ---- RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { _allocator->set_used(cl.total_used()); + if (_recording_allocator != NULL) { + _recording_allocator->clear_used(); + } } assert(_allocator->used_unlocked() == recalculate_used(), err_msg("inconsistent _allocator->used_unlocked(), " "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, _allocator->used_unlocked(), recalculate_used()));
*** 6298,6307 **** --- 6561,6589 ---- _old_set.add(alloc_region); } _hr_printer.retire(alloc_region); } + HeapRegion* G1CollectedHeap::alloc_highest_available_region() { + bool expanded = false; + uint index = _hrm.find_highest_available(&expanded); + + if (index != G1_NO_HRM_INDEX) { + if (expanded) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap expansion", + ergo_format_reason("requested address range outside heap bounds") + ergo_format_byte("region size"), + HeapRegion::GrainWords * HeapWordSize); + } + _hrm.allocate_free_regions_starting_at(index, 1); + return region_at(index); + } + return NULL; + } + + // Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { private: HeapRegionSet* _old_set;
*** 6334,6343 **** --- 6616,6626 ---- _free_count.increment(1u, hr->capacity()); } else if (hr->is_old()) { assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); _old_count.increment(1u, hr->capacity()); } else { + assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index())); ShouldNotReachHere(); } return false; }
< prev index next >