# HG changeset patch # User david # Date 1446647674 -3600 # Wed Nov 04 15:34:34 2015 +0100 # Node ID b8f8bc8ce70b7b19d9b58e730212723f39fd483e # Parent 923bde1cd5745e39dbd9a527e10800f5c8f9cd91 imported patch humongous diff --git a/src/share/vm/gc/g1/concurrentMark.cpp b/src/share/vm/gc/g1/concurrentMark.cpp --- a/src/share/vm/gc/g1/concurrentMark.cpp +++ b/src/share/vm/gc/g1/concurrentMark.cpp @@ -806,12 +806,7 @@ // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. - // We can use the region's orig_end() for the limit and the comparison value - // as it always contains the "real" end of the region that never changes and - // has no side effects. - // Due to the latter, there can also be no problem with the compiler generating - // reloads of the orig_end() call. - HeapWord* end = r->orig_end(); + HeapWord* end = r->end(); return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; } }; @@ -825,9 +820,7 @@ class NoteStartOfMarkHRClosure: public HeapRegionClosure { public: bool doHeapRegion(HeapRegion* r) { - if (!r->is_continues_humongous()) { - r->note_start_of_marking(); - } + r->note_start_of_marking(); return false; } }; @@ -1286,22 +1279,10 @@ // Takes a region that's not empty (i.e., it has at least one // live object in it and sets its corresponding bit on the region - // bitmap to 1. If the region is "starts humongous" it will also set - // to 1 the bits on the region bitmap that correspond to its - // associated "continues humongous" regions. + // bitmap to 1. void set_bit_for_region(HeapRegion* hr) { - assert(!hr->is_continues_humongous(), "should have filtered those out"); - BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); - if (!hr->is_starts_humongous()) { - // Normal (non-humongous) case: just set the bit. - _region_bm->par_at_put(index, true); - } else { - // Starts humongous case: calculate how many regions are part of - // this humongous region and then set the bit range. - BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); - _region_bm->par_at_put_range(index, end_index, true); - } + _region_bm->par_at_put(index, true); } public: @@ -1325,18 +1306,6 @@ _bm(bm), _region_marked_bytes(0) { } bool doHeapRegion(HeapRegion* hr) { - - if (hr->is_continues_humongous()) { - // We will ignore these here and process them when their - // associated "starts humongous" region is processed (see - // set_bit_for_heap_region()). Note that we cannot rely on their - // associated "starts humongous" region to have their bit set to - // 1 since, due to the region chunking in the parallel region - // iteration, a "continues humongous" region might be visited - // before its associated "starts humongous". - return false; - } - HeapWord* ntams = hr->next_top_at_mark_start(); HeapWord* start = hr->bottom(); @@ -1374,6 +1343,9 @@ // Add the size of this object to the number of marked bytes. marked_bytes += (size_t)obj_sz * HeapWordSize; + if (obj_end > hr->end()) { + break; + } // Find the next marked object after this one. start = _bm->getNextMarkedWordAddress(obj_end, ntams); } @@ -1446,17 +1418,6 @@ int failures() const { return _failures; } bool doHeapRegion(HeapRegion* hr) { - if (hr->is_continues_humongous()) { - // We will ignore these here and process them when their - // associated "starts humongous" region is processed (see - // set_bit_for_heap_region()). Note that we cannot rely on their - // associated "starts humongous" region to have their bit set to - // 1 since, due to the region chunking in the parallel region - // iteration, a "continues humongous" region might be visited - // before its associated "starts humongous". - return false; - } - int failures = 0; // Call the CalcLiveObjectsClosure to walk the marking bitmap for @@ -1573,18 +1534,6 @@ CMCountDataClosureBase(g1h, region_bm, card_bm) { } bool doHeapRegion(HeapRegion* hr) { - - if (hr->is_continues_humongous()) { - // We will ignore these here and process them when their - // associated "starts humongous" region is processed (see - // set_bit_for_heap_region()). Note that we cannot rely on their - // associated "starts humongous" region to have their bit set to - // 1 since, due to the region chunking in the parallel region - // iteration, a "continues humongous" region might be visited - // before its associated "starts humongous". - return false; - } - HeapWord* ntams = hr->next_top_at_mark_start(); HeapWord* top = hr->top(); @@ -1681,7 +1630,7 @@ const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } bool doHeapRegion(HeapRegion *hr) { - if (hr->is_continues_humongous() || hr->is_archive()) { + if (hr->is_archive()) { return false; } // We use a claim value of zero here because all regions @@ -1693,7 +1642,6 @@ _freed_bytes += hr->used(); hr->set_containing_set(NULL); if (hr->is_humongous()) { - assert(hr->is_starts_humongous(), "we should only see starts humongous"); _humongous_regions_removed.increment(1u, hr->capacity()); _g1->free_humongous_region(hr, _local_cleanup_list, true); } else { @@ -2342,7 +2290,7 @@ // circumspect about treating the argument as an object. void do_entry(void* entry) const { _task->increment_refs_reached(); - HeapRegion* hr = _g1h->heap_region_containing_raw(entry); + HeapRegion* hr = _g1h->heap_region_containing(entry); if (entry < hr->next_top_at_mark_start()) { // Until we get here, we don't know whether entry refers to a valid // object; it could instead have been a stale reference. @@ -2515,9 +2463,9 @@ // claim_region() and a humongous object allocation might force us // to do a bit of unnecessary work (due to some unnecessary bitmap // iterations) but it should not introduce and correctness issues. - HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); - - // Above heap_region_containing_raw may return NULL as we always scan claim + HeapRegion* curr_region = _g1h->heap_region_containing(finger); + + // Above heap_region_containing may return NULL as we always scan claim // until the end of the heap. In this case, just jump to the next region. HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; @@ -2594,7 +2542,7 @@ HeapWord* global_finger = finger(); if (global_finger != NULL && global_finger < _heap_end) { // The global finger always points to a heap region boundary. We - // use heap_region_containing_raw() to get the containing region + // use heap_region_containing() to get the containing region // given that the global finger could be pointing to a free region // which subsequently becomes continues humongous. If that // happens, heap_region_containing() will return the bottom of the @@ -2602,7 +2550,7 @@ // not hold any more. // Since we always iterate over all regions, we might get a NULL HeapRegion // here. - HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); + HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); guarantee(global_hr == NULL || global_finger == global_hr->bottom(), "global finger: " PTR_FORMAT " region: " HR_FORMAT, p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); @@ -2615,7 +2563,7 @@ HeapWord* task_finger = task->finger(); if (task_finger != NULL && task_finger < _heap_end) { // See above note on the global finger verification. - HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); + HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); guarantee(task_hr == NULL || task_finger == task_hr->bottom() || !task_hr->in_collection_set(), "task finger: " PTR_FORMAT " region: " HR_FORMAT, @@ -2643,17 +2591,6 @@ _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } bool doHeapRegion(HeapRegion* hr) { - if (hr->is_continues_humongous()) { - // We will ignore these here and process them when their - // associated "starts humongous" region is processed. - // Note that we cannot rely on their associated - // "starts humongous" region to have their bit set to 1 - // since, due to the region chunking in the parallel region - // iteration, a "continues humongous" region might be visited - // before its associated "starts humongous". - return false; - } - HeapWord* start = hr->bottom(); HeapWord* limit = hr->next_top_at_mark_start(); HeapWord* end = hr->end(); @@ -2961,8 +2898,6 @@ void CMTask::setup_for_region(HeapRegion* hr) { assert(hr != NULL, "claim_region() should have filtered out NULL regions"); - assert(!hr->is_continues_humongous(), - "claim_region() should have filtered out continues humongous regions"); _curr_region = hr; _finger = hr->bottom(); update_region_limit(); diff --git a/src/share/vm/gc/g1/concurrentMark.hpp b/src/share/vm/gc/g1/concurrentMark.hpp --- a/src/share/vm/gc/g1/concurrentMark.hpp +++ b/src/share/vm/gc/g1/concurrentMark.hpp @@ -772,16 +772,13 @@ size_t* marked_bytes_array, BitMap* task_card_bm); - // Counts the given memory region in the task/worker counting - // data structures for the given worker id. - inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); - // Counts the given object in the given task/worker counting // data structures. inline void count_object(oop obj, HeapRegion* hr, size_t* marked_bytes_array, - BitMap* task_card_bm); + BitMap* task_card_bm, + size_t word_size); // Attempts to mark the given object and, if successful, counts // the object in the given task/worker counting structures. diff --git a/src/share/vm/gc/g1/concurrentMark.inline.hpp b/src/share/vm/gc/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc/g1/concurrentMark.inline.hpp +++ b/src/share/vm/gc/g1/concurrentMark.inline.hpp @@ -89,9 +89,7 @@ size_t region_size_bytes = mr.byte_size(); uint index = hr->hrm_index(); - assert(!hr->is_continues_humongous(), "should not be HC region"); assert(hr == g1h->heap_region_containing(start), "sanity"); - assert(hr == g1h->heap_region_containing(mr.last()), "sanity"); assert(marked_bytes_array != NULL, "pre-condition"); assert(task_card_bm != NULL, "pre-condition"); @@ -116,23 +114,27 @@ set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */); } -// Counts the given memory region in the task/worker counting -// data structures for the given worker id. -inline void ConcurrentMark::count_region(MemRegion mr, - HeapRegion* hr, - uint worker_id) { - size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id); - BitMap* task_card_bm = count_card_bitmap_for(worker_id); - count_region(mr, hr, marked_bytes_array, task_card_bm); -} - // Counts the given object in the given task/worker counting data structures. inline void ConcurrentMark::count_object(oop obj, HeapRegion* hr, size_t* marked_bytes_array, - BitMap* task_card_bm) { - MemRegion mr((HeapWord*)obj, obj->size()); - count_region(mr, hr, marked_bytes_array, task_card_bm); + BitMap* task_card_bm, + size_t word_size) { + assert(!hr->is_continues_humongous(), "Cannot enter count_object with continues humongous"); + if (!hr->is_starts_humongous()) { + MemRegion mr((HeapWord*)obj, word_size); + count_region(mr, hr, marked_bytes_array, task_card_bm); + } else { + uint index = hr->hrm_index(); + do { + MemRegion mr(hr->bottom(), hr->top()); + count_region(mr, hr, marked_bytes_array, task_card_bm); + if (++index >= _g1h->num_regions()) { + break; + } + hr = _g1h->region_at(index); + } while (hr->is_continues_humongous()); + } } // Attempts to mark the given object and, if successful, counts @@ -141,10 +143,9 @@ HeapRegion* hr, size_t* marked_bytes_array, BitMap* task_card_bm) { - HeapWord* addr = (HeapWord*)obj; - if (_nextMarkBitMap->parMark(addr)) { + if (_nextMarkBitMap->parMark((HeapWord*)obj)) { // Update the task specific count data for the object. - count_object(obj, hr, marked_bytes_array, task_card_bm); + count_object(obj, hr, marked_bytes_array, task_card_bm, obj->size()); return true; } return false; @@ -157,10 +158,10 @@ size_t word_size, HeapRegion* hr, uint worker_id) { - HeapWord* addr = (HeapWord*)obj; - if (_nextMarkBitMap->parMark(addr)) { - MemRegion mr(addr, word_size); - count_region(mr, hr, worker_id); + if (_nextMarkBitMap->parMark((HeapWord*)obj)) { + size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id); + BitMap* task_card_bm = count_card_bitmap_for(worker_id); + count_object(obj, hr, marked_bytes_array, task_card_bm, word_size); return true; } return false; @@ -351,7 +352,7 @@ // Only get the containing region if the object is not marked on the // bitmap (otherwise, it's a waste of time since we won't do // anything with it). - HeapRegion* hr = _g1h->heap_region_containing_raw(obj); + HeapRegion* hr = _g1h->heap_region_containing(obj); if (!hr->obj_allocated_since_next_marking(obj)) { make_reference_grey(obj, hr); } @@ -371,7 +372,7 @@ assert(obj != NULL, "pre-condition"); HeapWord* addr = (HeapWord*) obj; if (hr == NULL) { - hr = _g1h->heap_region_containing_raw(addr); + hr = _g1h->heap_region_containing(addr); } else { assert(hr->is_in(addr), "pre-condition"); } @@ -380,16 +381,6 @@ // header it's impossible to get back a HC region. assert(!hr->is_continues_humongous(), "sanity"); - // We cannot assert that word_size == obj->size() given that obj - // might not be in a consistent state (another thread might be in - // the process of copying it). So the best thing we can do is to - // assert that word_size is under an upper bound which is its - // containing region's capacity. - assert(word_size * HeapWordSize <= hr->capacity(), - "size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT, - word_size * HeapWordSize, hr->capacity(), - HR_FORMAT_PARAMS(hr)); - if (addr < hr->next_top_at_mark_start()) { if (!_nextMarkBitMap->isMarked(addr)) { par_mark_and_count(obj, word_size, hr, worker_id); diff --git a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp --- a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp +++ b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp @@ -500,12 +500,10 @@ } void -G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) { - assert(new_top <= _end, "_end should have already been updated"); - +G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) { // The first BOT entry should have offset 0. reset_bot(); - alloc_block(_bottom, new_top); + alloc_block(_bottom, obj_top); } #ifndef PRODUCT diff --git a/src/share/vm/gc/g1/g1BlockOffsetTable.hpp b/src/share/vm/gc/g1/g1BlockOffsetTable.hpp --- a/src/share/vm/gc/g1/g1BlockOffsetTable.hpp +++ b/src/share/vm/gc/g1/g1BlockOffsetTable.hpp @@ -361,17 +361,18 @@ // implementation, that's true because NULL is represented as 0, and thus // never exceeds the "_next_offset_threshold". void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { - if (blk_end > _next_offset_threshold) + if (blk_end > _next_offset_threshold) { alloc_block_work1(blk_start, blk_end); + } } void alloc_block(HeapWord* blk, size_t size) { - alloc_block(blk, blk+size); + alloc_block(blk, blk+size); } HeapWord* block_start_unsafe(const void* addr); HeapWord* block_start_unsafe_const(const void* addr) const; - void set_for_starts_humongous(HeapWord* new_top); + void set_for_starts_humongous(HeapWord* obj_top); virtual void print_on(outputStream* out) PRODUCT_RETURN; }; diff --git a/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp b/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp --- a/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp +++ b/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp @@ -123,7 +123,6 @@ // to go back by. size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); q -= (N_words * n_cards_back); - assert(q >= gsp()->bottom(), "Went below bottom!"); index -= n_cards_back; offset = _array->offset_array(index); } diff --git a/src/share/vm/gc/g1/g1CodeBlobClosure.cpp b/src/share/vm/gc/g1/g1CodeBlobClosure.cpp --- a/src/share/vm/gc/g1/g1CodeBlobClosure.cpp +++ b/src/share/vm/gc/g1/g1CodeBlobClosure.cpp @@ -36,7 +36,7 @@ T oop_or_narrowoop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(oop_or_narrowoop)) { oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); - HeapRegion* hr = _g1h->heap_region_containing_raw(o); + HeapRegion* hr = _g1h->heap_region_containing(o); assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset"); hr->add_strong_code_root(_nm); } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -320,12 +320,8 @@ // The header of the new object will be placed at the bottom of // the first region. HeapWord* new_obj = first_hr->bottom(); - // This will be the new end of the first region in the series that - // should also match the end of the last region in the series. - HeapWord* new_end = new_obj + word_size_sum; - // This will be the new top of the first region that will reflect - // this allocation. - HeapWord* new_top = new_obj + word_size; + // This will be the new top of the new object. + HeapWord* obj_top = new_obj + word_size; // First, we need to zero the header of the space that we will be // allocating. When we update top further down, some refinement @@ -346,7 +342,7 @@ // will also update the BOT covering all the regions to reflect // that there is a single object that starts at the bottom of the // first region. - first_hr->set_starts_humongous(new_top, new_end); + first_hr->set_starts_humongous(obj_top); first_hr->set_allocation_context(context); // Then, if there are any, we will set up the "continues // humongous" regions. @@ -356,9 +352,6 @@ hr->set_continues_humongous(first_hr); hr->set_allocation_context(context); } - // If we have "continues humongous" regions (hr != NULL), then the - // end of the last one should match new_end. - assert(hr == NULL || hr->end() == new_end, "sanity"); // Up to this point no concurrent thread would have been able to // do any scanning on any region in this series. All the top @@ -371,18 +364,14 @@ // Now that the BOT and the object header have been initialized, // we can update top of the "starts humongous" region. - assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), - "new_top should be in this region"); - first_hr->set_top(new_top); + first_hr->set_top(MIN2(first_hr->end(), obj_top)); if (_hr_printer.is_active()) { - HeapWord* bottom = first_hr->bottom(); - HeapWord* end = first_hr->orig_end(); if ((first + 1) == last) { // the series has a single humongous region - _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top); + _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, obj_top); } else { // the series has more than one humongous regions - _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end); + _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end()); } } @@ -402,27 +391,27 @@ hr = region_at(i); if ((i + 1) == last) { // last continues humongous region - assert(hr->bottom() < new_top && new_top <= hr->end(), + assert(hr->bottom() < obj_top && obj_top <= hr->end(), "new_top should fall on this region"); - hr->set_top(new_top); - _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top); + hr->set_top(obj_top); + _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top); } else { // not last one - assert(new_top > hr->end(), "new_top should be above this region"); + assert(obj_top > hr->end(), "obj_top should be above this region"); hr->set_top(hr->end()); _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end()); } } - // If we have continues humongous regions (hr != NULL), then the - // end of the last one should match new_end and its top should - // match new_top. - assert(hr == NULL || - (hr->end() == new_end && hr->top() == new_top), "sanity"); + // If we have continues humongous regions (hr != NULL), its top should + // match obj_top. + assert(hr == NULL || (hr->top() == obj_top), "sanity"); check_bitmaps("Humongous Region Allocation", first_hr); - assert(first_hr->used() == word_size * HeapWordSize, "invariant"); - increase_used(first_hr->used()); - _humongous_set.add(first_hr); + increase_used(word_size * HeapWordSize); + + for (uint i = first; i < last; ++i) { + _humongous_set.add(region_at(i)); + } return new_obj; } @@ -1139,13 +1128,6 @@ bool doHeapRegion(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); - if (r->is_continues_humongous()) { - // We'll assert that the strong code root list and RSet is empty - assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); - assert(hrrs->occupied() == 0, "RSet should be empty"); - return false; - } - _g1h->reset_gc_time_stamps(r); hrrs->clear(); // You might think here that we could clear just the cards @@ -1205,12 +1187,7 @@ if (hr->is_free()) { // We only generate output for non-empty regions. } else if (hr->is_starts_humongous()) { - if (hr->region_num() == 1) { - // single humongous region - _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous); - } else { - _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); - } + _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); } else if (hr->is_continues_humongous()) { _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); } else if (hr->is_archive()) { @@ -2222,17 +2199,7 @@ } void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { - assert(!hr->is_continues_humongous(), "pre-condition"); hr->reset_gc_time_stamp(); - if (hr->is_starts_humongous()) { - uint first_index = hr->hrm_index() + 1; - uint last_index = hr->last_hc_index(); - for (uint i = first_index; i < last_index; i += 1) { - HeapRegion* chr = region_at(i); - assert(chr->is_continues_humongous(), "sanity"); - chr->reset_gc_time_stamp(); - } - } } #ifndef PRODUCT @@ -2300,9 +2267,7 @@ public: SumUsedClosure() : _used(0) {} bool doHeapRegion(HeapRegion* r) { - if (!r->is_continues_humongous()) { - _used += r->used(); - } + _used += r->used(); return false; } size_t result() { return _used; } @@ -2523,9 +2488,9 @@ bool G1CollectedHeap::is_in(const void* p) const { if (_hrm.reserved().contains(p)) { // Given that we know that p is in the reserved space, - // heap_region_containing_raw() should successfully + // heap_region_containing() should successfully // return the containing region. - HeapRegion* hr = heap_region_containing_raw(p); + HeapRegion* hr = heap_region_containing(p); return hr->is_in(p); } else { return false; @@ -3062,7 +3027,7 @@ r->verify(_vo, &failures); if (failures) { _failures = true; - } else { + } else if (!r->is_starts_humongous()) { VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); r->object_iterate(¬_dead_yet_cl); if (_vo != VerifyOption_G1UseNextMarking) { @@ -5316,24 +5281,9 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr, FreeRegionList* free_list, bool par) { - assert(hr->is_starts_humongous(), "this is only for starts humongous regions"); assert(free_list != NULL, "pre-condition"); - - size_t hr_capacity = hr->capacity(); - // We need to read this before we make the region non-humongous, - // otherwise the information will be gone. - uint last_index = hr->last_hc_index(); hr->clear_humongous(); free_region(hr, free_list, par); - - uint i = hr->hrm_index() + 1; - while (i < last_index) { - HeapRegion* curr_hr = region_at(i); - assert(curr_hr->is_continues_humongous(), "invariant"); - curr_hr->clear_humongous(); - free_region(curr_hr, free_list, par); - i += 1; - } } void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, @@ -5497,8 +5447,6 @@ bool failures() { return _failures; } virtual bool doHeapRegion(HeapRegion* hr) { - if (hr->is_continues_humongous()) return false; - bool result = _g1h->verify_bitmaps(_caller, hr); if (!result) { _failures = true; @@ -5772,11 +5720,10 @@ !r->rem_set()->is_empty()) { if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + gclog_or_tty->print_cr("Live humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), - r->region_num(), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), @@ -5793,11 +5740,10 @@ PTR_FORMAT " is not.", p2i(r->bottom())); if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + gclog_or_tty->print_cr("Dead humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), - r->region_num(), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), @@ -5809,10 +5755,16 @@ if (next_bitmap->isMarked(r->bottom())) { next_bitmap->clear(r->bottom()); } - _freed_bytes += r->used(); - r->set_containing_set(NULL); - _humongous_regions_removed.increment(1u, r->capacity()); - g1h->free_humongous_region(r, _free_region_list, false); + do { + _freed_bytes += r->used(); + r->set_containing_set(NULL); + _humongous_regions_removed.increment(1u, r->capacity()); + g1h->free_humongous_region(r, _free_region_list, false); + if (++region_idx >= g1h->num_regions()) { + break; + } + r = g1h->region_at(region_idx); + } while (r->is_continues_humongous()); return false; } @@ -6047,10 +5999,6 @@ } bool doHeapRegion(HeapRegion* r) { - if (r->is_continues_humongous()) { - return false; - } - if (r->is_empty()) { // Add free regions to the free list r->set_free(); @@ -6238,14 +6186,10 @@ _old_count(), _humongous_count(), _free_count(){ } bool doHeapRegion(HeapRegion* hr) { - if (hr->is_continues_humongous()) { - return false; - } - if (hr->is_young()) { // TODO - } else if (hr->is_starts_humongous()) { - assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()); + } else if (hr->is_humongous()) { + assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index()); _humongous_count.increment(1u, hr->capacity()); } else if (hr->is_empty()) { assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); diff --git a/src/share/vm/gc/g1/g1CollectedHeap.hpp b/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp @@ -1280,11 +1280,6 @@ // Returns the HeapRegion that contains addr. addr must not be NULL. template - inline HeapRegion* heap_region_containing_raw(const T addr) const; - - // Returns the HeapRegion that contains addr. addr must not be NULL. - // If addr is within a humongous continues region, it returns its humongous start region. - template inline HeapRegion* heap_region_containing(const T addr) const; // A CollectedHeap is divided into a dense sequence of "blocks"; that is, diff --git a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp @@ -77,7 +77,7 @@ } template -inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { +inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { assert(addr != NULL, "invariant"); assert(is_in_g1_reserved((const void*) addr), "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", @@ -85,15 +85,6 @@ return _hrm.addr_to_region((HeapWord*) addr); } -template -inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { - HeapRegion* hr = heap_region_containing_raw(addr); - if (hr->is_continues_humongous()) { - return hr->humongous_start_region(); - } - return hr; -} - inline void G1CollectedHeap::reset_gc_time_stamp() { _gc_time_stamp = 0; OrderAccess::fence(); @@ -124,9 +115,9 @@ assert_heap_not_locked(); // Assign the containing region to containing_hr so that we don't - // have to keep calling heap_region_containing_raw() in the + // have to keep calling heap_region_containing() in the // asserts below. - DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) + DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);) assert(word_size > 0, "pre-condition"); assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_young(), "it should be young"); diff --git a/src/share/vm/gc/g1/g1MarkSweep.cpp b/src/share/vm/gc/g1/g1MarkSweep.cpp --- a/src/share/vm/gc/g1/g1MarkSweep.cpp +++ b/src/share/vm/gc/g1/g1MarkSweep.cpp @@ -279,8 +279,8 @@ } else { assert(hr->is_empty(), "Should have been cleared in phase 2."); } - hr->reset_during_compaction(); } + hr->reset_during_compaction(); } else if (!hr->is_pinned()) { hr->compact(); } @@ -334,9 +334,6 @@ HeapWord* end = hr->end(); FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); - assert(hr->is_starts_humongous(), - "Only the start of a humongous region should be freed."); - hr->set_containing_set(NULL); _humongous_regions_removed.increment(1u, hr->capacity()); @@ -373,8 +370,9 @@ bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { if (hr->is_humongous()) { + oop obj; if (hr->is_starts_humongous()) { - oop obj = oop(hr->bottom()); + obj = oop(hr->bottom()); if (obj->is_gc_marked()) { obj->forward_to(obj); } else { @@ -382,6 +380,10 @@ } } else { assert(hr->is_continues_humongous(), "Invalid humongous."); + obj = oop(hr->humongous_start_region()->bottom()); + if (!obj->is_gc_marked()) { + free_humongous_region(hr); + } } } else if (!hr->is_pinned()) { prepare_for_compaction(hr, hr->end()); diff --git a/src/share/vm/gc/g1/g1OopClosures.inline.hpp b/src/share/vm/gc/g1/g1OopClosures.inline.hpp --- a/src/share/vm/gc/g1/g1OopClosures.inline.hpp +++ b/src/share/vm/gc/g1/g1OopClosures.inline.hpp @@ -222,7 +222,7 @@ template void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { - if (_g1->heap_region_containing_raw(new_obj)->is_young()) { + if (_g1->heap_region_containing(new_obj)->is_young()) { _scanned_klass->record_modified_oops(); } } diff --git a/src/share/vm/gc/g1/g1ParScanThreadState.cpp b/src/share/vm/gc/g1/g1ParScanThreadState.cpp --- a/src/share/vm/gc/g1/g1ParScanThreadState.cpp +++ b/src/share/vm/gc/g1/g1ParScanThreadState.cpp @@ -216,7 +216,7 @@ oop const old, markOop const old_mark) { const size_t word_sz = old->size(); - HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); + HeapRegion* const from_region = _g1h->heap_region_containing(old); // +1 to make the -1 indexes valid... const int young_index = from_region->young_index_in_cset()+1; assert( (from_region->is_young() && young_index > 0) || @@ -294,9 +294,9 @@ if (G1StringDedup::is_enabled()) { const bool is_from_young = state.is_young(); const bool is_to_young = dest_state.is_young(); - assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), + assert(is_from_young == _g1h->heap_region_containing(old)->is_young(), "sanity"); - assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), + assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), "sanity"); G1StringDedup::enqueue_from_evacuation(is_from_young, is_to_young, @@ -314,7 +314,7 @@ oop* old_p = set_partial_array_mask(old); push_on_queue(old_p); } else { - HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); + HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr); _scanner.set_region(to_region); obj->oop_iterate_backwards(&_scanner); } diff --git a/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp b/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp --- a/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp +++ b/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp @@ -101,7 +101,7 @@ // so that the heap remains parsable in case of evacuation failure. to_obj_array->set_length(end); } - _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); + _scanner.set_region(_g1h->heap_region_containing(to_obj)); // Process indexes [start,end). It will also process the header // along with the first chunk (i.e., the chunk with start == 0). // Note that at this point the length field of to_obj_array is not @@ -115,10 +115,7 @@ template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { if (!has_partial_array_mask(ref_to_scan)) { - // Note: we can use "raw" versions of "region_containing" because - // "obj_to_scan" is definitely in the heap, and is not in a - // humongous region. - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); + HeapRegion* r = _g1h->heap_region_containing(ref_to_scan); do_oop_evac(ref_to_scan, r); } else { do_oop_partial_array((oop*)ref_to_scan); diff --git a/src/share/vm/gc/g1/g1RemSet.inline.hpp b/src/share/vm/gc/g1/g1RemSet.inline.hpp --- a/src/share/vm/gc/g1/g1RemSet.inline.hpp +++ b/src/share/vm/gc/g1/g1RemSet.inline.hpp @@ -60,8 +60,6 @@ assert(_g1->is_in_reserved(obj), "must be in heap"); #endif // ASSERT - assert(from == NULL || from->is_in_reserved(p), "p is not in from"); - HeapRegion* to = _g1->heap_region_containing(obj); if (from != to) { assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); diff --git a/src/share/vm/gc/g1/g1StringDedup.cpp b/src/share/vm/gc/g1/g1StringDedup.cpp --- a/src/share/vm/gc/g1/g1StringDedup.cpp +++ b/src/share/vm/gc/g1/g1StringDedup.cpp @@ -52,7 +52,7 @@ bool G1StringDedup::is_candidate_from_mark(oop obj) { if (java_lang_String::is_instance_inlined(obj)) { - bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young(); + bool from_young = G1CollectedHeap::heap()->heap_region_containing(obj)->is_young(); if (from_young && obj->age() < StringDeduplicationAgeThreshold) { // Candidate found. String is being evacuated from young to old but has not // reached the deduplication age threshold, i.e. has not previously been a diff --git a/src/share/vm/gc/g1/heapRegion.cpp b/src/share/vm/gc/g1/heapRegion.cpp --- a/src/share/vm/gc/g1/heapRegion.cpp +++ b/src/share/vm/gc/g1/heapRegion.cpp @@ -67,7 +67,7 @@ // not considered dead, either because it is marked (in the mark bitmap) // or it was allocated after marking finished, then we add it. Otherwise // we can safely ignore the object. - if (!g1h->is_obj_dead(oop(cur), _hr)) { + if (!g1h->is_obj_dead(oop(cur))) { oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); } else { oop_size = _hr->block_size(cur); @@ -81,7 +81,7 @@ HeapWord* next_obj = cur + oop_size; while (next_obj < top) { // Keep filtering the remembered set. - if (!g1h->is_obj_dead(cur_oop, _hr)) { + if (!g1h->is_obj_dead(cur_oop)) { // Bottom lies entirely below top, so we can call the // non-memRegion version of oop_iterate below. cur_oop->oop_iterate(_rs_scan); @@ -93,7 +93,7 @@ } // Last object. Need to do dead-obj filtering here too. - if (!g1h->is_obj_dead(oop(cur), _hr)) { + if (!g1h->is_obj_dead(oop(cur))) { oop(cur)->oop_iterate(_rs_scan, mr); } } @@ -162,8 +162,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { assert(_humongous_start_region == NULL, "we should have already filtered out humongous regions"); - assert(_end == orig_end(), - "we should have already filtered out humongous regions"); assert(!in_collection_set(), "Should not clear heap region %u in the collection set", hrm_index()); @@ -213,24 +211,18 @@ _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; } -void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) { +void HeapRegion::set_starts_humongous(HeapWord* obj_top) { assert(!is_humongous(), "sanity / pre-condition"); - assert(end() == orig_end(), - "Should be normal before the humongous object allocation"); assert(top() == bottom(), "should be empty"); - assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); _type.set_starts_humongous(); _humongous_start_region = this; - set_end(new_end); - _offsets.set_for_starts_humongous(new_top); + _offsets.set_for_starts_humongous(obj_top); } void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { assert(!is_humongous(), "sanity / pre-condition"); - assert(end() == orig_end(), - "Should be normal before the humongous object allocation"); assert(top() == bottom(), "should be empty"); assert(first_hr->is_starts_humongous(), "pre-condition"); @@ -241,18 +233,6 @@ void HeapRegion::clear_humongous() { assert(is_humongous(), "pre-condition"); - if (is_starts_humongous()) { - assert(top() <= end(), "pre-condition"); - set_end(orig_end()); - if (top() > end()) { - // at least one "continues humongous" region after it - set_top(end()); - } - } else { - // continues humongous - assert(end() == orig_end(), "sanity"); - } - assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); _humongous_start_region = NULL; } @@ -290,11 +270,6 @@ hr_clear(false /*par*/, false /*clear_space*/); set_top(bottom()); record_timestamp(); - - assert(mr.end() == orig_end(), - "Given region end address " PTR_FORMAT " should match exactly " - "bottom plus one region size, i.e. " PTR_FORMAT, - p2i(mr.end()), p2i(orig_end())); } CompactibleSpace* HeapRegion::next_compaction_space() const { @@ -720,7 +695,8 @@ HeapRegion* to = _g1h->heap_region_containing(obj); if (from != NULL && to != NULL && from != to && - !to->is_pinned()) { + !to->is_pinned() && + !from->is_continues_humongous()) { jbyte cv_obj = *_bs->byte_for_const(_containing_obj); jbyte cv_field = *_bs->byte_for_const(p); const jbyte dirty = CardTableModRefBS::dirty_card_val(); @@ -832,7 +808,7 @@ _offsets.verify(); } - if (p != top()) { + if (!is_region_humongous && p != top()) { gclog_or_tty->print_cr("end of last object " PTR_FORMAT " " "does not match top " PTR_FORMAT, p2i(p), p2i(top())); *failures = true; @@ -840,7 +816,6 @@ } HeapWord* the_end = end(); - assert(p == top(), "it should still hold"); // Do some extra BOT consistency checking for addresses in the // range [top, end). BOT look-ups in this range should yield // top. No point in doing that if top == end (there's nothing there). diff --git a/src/share/vm/gc/g1/heapRegion.hpp b/src/share/vm/gc/g1/heapRegion.hpp --- a/src/share/vm/gc/g1/heapRegion.hpp +++ b/src/share/vm/gc/g1/heapRegion.hpp @@ -389,8 +389,6 @@ size_t garbage_bytes() { size_t used_at_mark_start_bytes = (prev_top_at_mark_start() - bottom()) * HeapWordSize; - assert(used_at_mark_start_bytes >= marked_bytes(), - "Can't mark more than we have."); return used_at_mark_start_bytes - marked_bytes(); } @@ -409,7 +407,6 @@ void add_to_marked_bytes(size_t incr_bytes) { _next_marked_bytes = _next_marked_bytes + incr_bytes; - assert(_next_marked_bytes <= used(), "invariant" ); } void zero_marked_bytes() { @@ -445,57 +442,13 @@ return _humongous_start_region; } - // Return the number of distinct regions that are covered by this region: - // 1 if the region is not humongous, >= 1 if the region is humongous. - uint region_num() const { - if (!is_humongous()) { - return 1U; - } else { - assert(is_starts_humongous(), "doesn't make sense on HC regions"); - assert(capacity() % HeapRegion::GrainBytes == 0, "sanity"); - return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes); - } - } - - // Return the index + 1 of the last HC regions that's associated - // with this HS region. - uint last_hc_index() const { - assert(is_starts_humongous(), "don't call this otherwise"); - return hrm_index() + region_num(); - } - - // Same as Space::is_in_reserved, but will use the original size of the region. - // The original size is different only for start humongous regions. They get - // their _end set up to be the end of the last continues region of the - // corresponding humongous object. - bool is_in_reserved_raw(const void* p) const { - return _bottom <= p && p < orig_end(); - } - // Makes the current region be a "starts humongous" region, i.e., // the first region in a series of one or more contiguous regions - // that will contain a single "humongous" object. The two parameters - // are as follows: + // that will contain a single "humongous" object. // - // new_top : The new value of the top field of this region which - // points to the end of the humongous object that's being - // allocated. If there is more than one region in the series, top - // will lie beyond this region's original end field and on the last - // region in the series. - // - // new_end : The new value of the end field of this region which - // points to the end of the last region in the series. If there is - // one region in the series (namely: this one) end will be the same - // as the original end of this region. - // - // Updating top and end as described above makes this region look as - // if it spans the entire space taken up by all the regions in the - // series and an single allocation moved its top to new_top. This - // ensures that the space (capacity / allocated) taken up by all - // humongous regions can be calculated by just looking at the - // "starts humongous" regions and by ignoring the "continues - // humongous" regions. - void set_starts_humongous(HeapWord* new_top, HeapWord* new_end); + // obj_top : points to the end of the humongous object that's being + // allocated. + void set_starts_humongous(HeapWord* obj_top); // Makes the current region be a "continues humongous' // region. first_hr is the "start humongous" region of the series @@ -566,9 +519,6 @@ void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } - // For the start region of a humongous sequence, it's original end(). - HeapWord* orig_end() const { return _bottom + GrainWords; } - // Reset HR stuff to default values. void hr_clear(bool par, bool clear_space, bool locked = false); void par_clear(); @@ -614,9 +564,6 @@ bool is_marked() { return _prev_top_at_mark_start != bottom(); } void reset_during_compaction() { - assert(is_starts_humongous(), - "should only be called for starts humongous regions"); - zero_marked_bytes(); init_top_at_mark_start(); } diff --git a/src/share/vm/gc/g1/heapRegion.inline.hpp b/src/share/vm/gc/g1/heapRegion.inline.hpp --- a/src/share/vm/gc/g1/heapRegion.inline.hpp +++ b/src/share/vm/gc/g1/heapRegion.inline.hpp @@ -115,6 +115,11 @@ inline bool HeapRegion::block_is_obj(const HeapWord* p) const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + if (!this->is_in(p)) { + HeapRegion* hr = g1h->heap_region_containing(p); + return hr->block_is_obj(p); + } if (ClassUnloadingWithConcurrentMark) { return !g1h->is_obj_dead(oop(p), this); } @@ -176,10 +181,6 @@ _prev_top_at_mark_start = _next_top_at_mark_start; _prev_marked_bytes = _next_marked_bytes; _next_marked_bytes = 0; - - assert(_prev_marked_bytes <= - (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * - HeapWordSize, "invariant"); } inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { diff --git a/src/share/vm/gc/g1/heapRegionManager.cpp b/src/share/vm/gc/g1/heapRegionManager.cpp --- a/src/share/vm/gc/g1/heapRegionManager.cpp +++ b/src/share/vm/gc/g1/heapRegionManager.cpp @@ -343,63 +343,18 @@ continue; } HeapRegion* r = _regions.get_by_index(index); - // We'll ignore "continues humongous" regions (we'll process them - // when we come across their corresponding "start humongous" - // region) and regions already claimed. + // We'll ignore regions already claimed. // However, if the iteration is specified as concurrent, the values for // is_starts_humongous and is_continues_humongous can not be trusted, // and we should just blindly iterate over regions regardless of their // humongous status. - if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) { + if (hrclaimer->is_region_claimed(index)) { continue; } // OK, try to claim it if (!hrclaimer->claim_region(index)) { continue; } - // Success! - // As mentioned above, special treatment of humongous regions can only be - // done if we are iterating non-concurrently. - if (!concurrent && r->is_starts_humongous()) { - // If the region is "starts humongous" we'll iterate over its - // "continues humongous" first; in fact we'll do them - // first. The order is important. In one case, calling the - // closure on the "starts humongous" region might de-allocate - // and clear all its "continues humongous" regions and, as a - // result, we might end up processing them twice. So, we'll do - // them first (note: most closures will ignore them anyway) and - // then we'll do the "starts humongous" region. - for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { - HeapRegion* chr = _regions.get_by_index(ch_index); - - assert(chr->is_continues_humongous(), "Must be humongous region"); - assert(chr->humongous_start_region() == r, - "Must work on humongous continuation of the original start region " - PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)); - assert(!hrclaimer->is_region_claimed(ch_index), - "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); - - // Claim the region so no other worker tries to process the region. When a worker processes a - // starts_humongous region it may also process the associated continues_humongous regions. - // The continues_humongous regions can be changed to free regions. Unless this worker claims - // all of these regions, other workers might try claim and process these newly free regions. - bool claim_result = hrclaimer->claim_region(ch_index); - guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); - - bool res2 = blk->doHeapRegion(chr); - if (res2) { - return; - } - - // Right now, this holds (i.e., no closure that actually - // does something with "continues humongous" regions - // clears them). We might have to weaken it in the future, - // but let's leave these two asserts here for extra safety. - assert(chr->is_continues_humongous(), "should still be the case"); - assert(chr->humongous_start_region() == r, "sanity"); - } - } - bool res = blk->doHeapRegion(r); if (res) { return; @@ -508,11 +463,7 @@ // this method may be called, we have only completed allocation of the regions, // but not put into a region set. prev_committed = true; - if (hr->is_starts_humongous()) { - prev_end = hr->orig_end(); - } else { - prev_end = hr->end(); - } + prev_end = hr->end(); } for (uint i = _allocated_heapregions_length; i < max_length(); i++) { guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); diff --git a/src/share/vm/gc/g1/heapRegionRemSet.cpp b/src/share/vm/gc/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc/g1/heapRegionRemSet.cpp +++ b/src/share/vm/gc/g1/heapRegionRemSet.cpp @@ -105,7 +105,7 @@ // now reused for the corresponding start humongous region, we need to // make sure that we detect this. Thus, we call is_in_reserved_raw() // instead of just is_in_reserved() here. - if (loc_hr->is_in_reserved_raw(from)) { + if (loc_hr->is_in_reserved(from)) { size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); CardIdx_t from_card = (CardIdx_t) hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); @@ -433,7 +433,7 @@ } // Note that this may be a continued H region. - HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); + HeapRegion* from_hr = _g1h->heap_region_containing(from); RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); // If the region is already coarsened, return. @@ -786,7 +786,7 @@ } bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { - HeapRegion* hr = _g1h->heap_region_containing_raw(from); + HeapRegion* hr = _g1h->heap_region_containing(from); RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); // Is this region in the coarse map? if (_coarse_map.at(hr_ind)) return true; diff --git a/src/share/vm/gc/g1/satbQueue.cpp b/src/share/vm/gc/g1/satbQueue.cpp --- a/src/share/vm/gc/g1/satbQueue.cpp +++ b/src/share/vm/gc/g1/satbQueue.cpp @@ -79,7 +79,7 @@ assert(heap->is_in_reserved(entry), "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); - HeapRegion* region = heap->heap_region_containing_raw(entry); + HeapRegion* region = heap->heap_region_containing(entry); assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry)); if (entry >= region->next_top_at_mark_start()) { return false; # HG changeset patch # User david # Date 1446647681 -3600 # Wed Nov 04 15:34:41 2015 +0100 # Node ID 2781d87a13e60bf6e7ad9e56a8b78d47c2d135a5 # Parent b8f8bc8ce70b7b19d9b58e730212723f39fd483e imported patch rev1 diff --git a/src/share/vm/gc/g1/concurrentMark.inline.hpp b/src/share/vm/gc/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc/g1/concurrentMark.inline.hpp +++ b/src/share/vm/gc/g1/concurrentMark.inline.hpp @@ -125,15 +125,11 @@ MemRegion mr((HeapWord*)obj, word_size); count_region(mr, hr, marked_bytes_array, task_card_bm); } else { - uint index = hr->hrm_index(); do { MemRegion mr(hr->bottom(), hr->top()); count_region(mr, hr, marked_bytes_array, task_card_bm); - if (++index >= _g1h->num_regions()) { - break; - } - hr = _g1h->region_at(index); - } while (hr->is_continues_humongous()); + hr = _g1h->next_region_by_index(hr); + } while (hr != NULL && hr->is_continues_humongous()); } } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -5756,15 +5756,13 @@ next_bitmap->clear(r->bottom()); } do { + HeapRegion* next = g1h->next_region_by_index(r); _freed_bytes += r->used(); r->set_containing_set(NULL); _humongous_regions_removed.increment(1u, r->capacity()); g1h->free_humongous_region(r, _free_region_list, false); - if (++region_idx >= g1h->num_regions()) { - break; - } - r = g1h->region_at(region_idx); - } while (r->is_continues_humongous()); + r = next; + } while (r != NULL && r->is_continues_humongous()); return false; } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.hpp b/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp @@ -1243,6 +1243,9 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* region_at(uint index) const; + // Return the next region (or null if no more region is available). + inline HeapRegion* next_region_by_index(HeapRegion* hr) const; + // Calculate the region index of the given address. Given address must be // within the heap. inline uint addr_to_region(HeapWord* addr) const; diff --git a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp @@ -65,6 +65,10 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } +inline HeapRegion* G1CollectedHeap::next_region_by_index(HeapRegion* hr) const { + return _hrm.next_region_by_index(hr); +} + inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { assert(is_in_reserved(addr), "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", diff --git a/src/share/vm/gc/g1/heapRegionManager.hpp b/src/share/vm/gc/g1/heapRegionManager.hpp --- a/src/share/vm/gc/g1/heapRegionManager.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.hpp @@ -150,6 +150,9 @@ // is valid. inline HeapRegion* at(uint index) const; + // Return the next region (or null if no more region is available). + inline HeapRegion* next_region_by_index(HeapRegion* hr) const; + // If addr is within the committed space return its corresponding // HeapRegion, otherwise return NULL. inline HeapRegion* addr_to_region(HeapWord* addr) const; diff --git a/src/share/vm/gc/g1/heapRegionManager.inline.hpp b/src/share/vm/gc/g1/heapRegionManager.inline.hpp --- a/src/share/vm/gc/g1/heapRegionManager.inline.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.inline.hpp @@ -47,6 +47,18 @@ return hr; } +inline HeapRegion* HeapRegionManager::next_region_by_index(HeapRegion* hr) const { + uint index = hr->hrm_index(); + assert(is_available(index), "pre-condition"); + index++; + if (index < max_length() && is_available(index)) { + return at(index); + } else { + return NULL; + } +} + + inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) { _free_list.add_ordered(hr); } # HG changeset patch # User david # Date 1446654539 -3600 # Wed Nov 04 17:28:59 2015 +0100 # Node ID 4c58232b7425c01d7b4fe48ce288a7d44d416fdf # Parent 2781d87a13e60bf6e7ad9e56a8b78d47c2d135a5 imported patch rev2 diff --git a/src/share/vm/gc/g1/concurrentMark.cpp b/src/share/vm/gc/g1/concurrentMark.cpp --- a/src/share/vm/gc/g1/concurrentMark.cpp +++ b/src/share/vm/gc/g1/concurrentMark.cpp @@ -806,6 +806,7 @@ // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. + // end never changes in G1. HeapWord* end = r->end(); return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; } @@ -1343,6 +1344,8 @@ // Add the size of this object to the number of marked bytes. marked_bytes += (size_t)obj_sz * HeapWordSize; + // This will happen if we are handling a humongous object that spans + // several heap regions. if (obj_end > hr->end()) { break; } @@ -1432,7 +1435,9 @@ // We're not OK if expected marked bytes > actual marked bytes. It means // we have missed accounting some objects during the actual marking. - if (exp_marked_bytes > act_marked_bytes) { + // For start_humongous regions, the size of the whole object will be + // in exp_marked_bytes, so this check does not apply in this case. + if (exp_marked_bytes > act_marked_bytes && !hr->is_starts_humongous()) { failures += 1; } @@ -2440,29 +2445,6 @@ while (finger < _heap_end) { assert(_g1h->is_in_g1_reserved(finger), "invariant"); - // Note on how this code handles humongous regions. In the - // normal case the finger will reach the start of a "starts - // humongous" (SH) region. Its end will either be the end of the - // last "continues humongous" (CH) region in the sequence, or the - // standard end of the SH region (if the SH is the only region in - // the sequence). That way claim_region() will skip over the CH - // regions. However, there is a subtle race between a CM thread - // executing this method and a mutator thread doing a humongous - // object allocation. The two are not mutually exclusive as the CM - // thread does not need to hold the Heap_lock when it gets - // here. So there is a chance that claim_region() will come across - // a free region that's in the progress of becoming a SH or a CH - // region. In the former case, it will either - // a) Miss the update to the region's end, in which case it will - // visit every subsequent CH region, will find their bitmaps - // empty, and do nothing, or - // b) Will observe the update of the region's end (in which case - // it will skip the subsequent CH regions). - // If it comes across a region that suddenly becomes CH, the - // scenario will be similar to b). So, the race between - // claim_region() and a humongous object allocation might force us - // to do a bit of unnecessary work (due to some unnecessary bitmap - // iterations) but it should not introduce and correctness issues. HeapRegion* curr_region = _g1h->heap_region_containing(finger); // Above heap_region_containing may return NULL as we always scan claim @@ -2541,13 +2523,6 @@ // Verify the global finger HeapWord* global_finger = finger(); if (global_finger != NULL && global_finger < _heap_end) { - // The global finger always points to a heap region boundary. We - // use heap_region_containing() to get the containing region - // given that the global finger could be pointing to a free region - // which subsequently becomes continues humongous. If that - // happens, heap_region_containing() will return the bottom of the - // corresponding starts humongous region and the check below will - // not hold any more. // Since we always iterate over all regions, we might get a NULL HeapRegion // here. HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); diff --git a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp --- a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp +++ b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp @@ -499,16 +499,14 @@ return _next_offset_threshold; } -void -G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) { +void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) { // The first BOT entry should have offset 0. reset_bot(); alloc_block(_bottom, obj_top); } #ifndef PRODUCT -void -G1BlockOffsetArrayContigSpace::print_on(outputStream* out) { +void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) { G1BlockOffsetArray::print_on(out); out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold)); out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index); diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -1129,6 +1129,14 @@ HeapRegionRemSet* hrrs = r->rem_set(); _g1h->reset_gc_time_stamps(r); + + if (r->is_continues_humongous()) { + // We'll assert that the strong code root list and RSet is empty + assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); + assert(hrrs->occupied() == 0, "RSet should be empty"); + return false; + } + hrrs->clear(); // You might think here that we could clear just the cards // corresponding to the used region. But no: if we leave a dirty card @@ -5279,15 +5287,16 @@ } void G1CollectedHeap::free_humongous_region(HeapRegion* hr, - FreeRegionList* free_list, - bool par) { + FreeRegionList* free_list, + bool par) { + assert(hr->is_humongous(), "this is only for humongous regions"); assert(free_list != NULL, "pre-condition"); hr->clear_humongous(); free_region(hr, free_list, par); } void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, - const HeapRegionSetCount& humongous_regions_removed) { + const HeapRegionSetCount& humongous_regions_removed) { if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) { MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); _old_set.bulk_remove(old_regions_removed); @@ -5720,7 +5729,7 @@ !r->rem_set()->is_empty()) { if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Live humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), @@ -5740,7 +5749,7 @@ PTR_FORMAT " is not.", p2i(r->bottom())); if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Dead humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), diff --git a/src/share/vm/gc/g1/g1CollectedHeap.hpp b/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp @@ -1178,7 +1178,6 @@ void prepend_to_freelist(FreeRegionList* list); void decrement_summary_bytes(size_t bytes); - // Returns "TRUE" iff "p" points into the committed areas of the heap. virtual bool is_in(const void* p) const; #ifdef ASSERT // Returns whether p is in one of the available areas of the heap. Slow but diff --git a/src/share/vm/gc/g1/g1MarkSweep.cpp b/src/share/vm/gc/g1/g1MarkSweep.cpp --- a/src/share/vm/gc/g1/g1MarkSweep.cpp +++ b/src/share/vm/gc/g1/g1MarkSweep.cpp @@ -370,20 +370,12 @@ bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { if (hr->is_humongous()) { - oop obj; - if (hr->is_starts_humongous()) { - obj = oop(hr->bottom()); - if (obj->is_gc_marked()) { - obj->forward_to(obj); - } else { - free_humongous_region(hr); - } - } else { - assert(hr->is_continues_humongous(), "Invalid humongous."); - obj = oop(hr->humongous_start_region()->bottom()); - if (!obj->is_gc_marked()) { - free_humongous_region(hr); - } + oop obj = oop(hr->humongous_start_region()->bottom()); + if (hr->is_starts_humongous() && obj->is_gc_marked()) { + obj->forward_to(obj); + } + if (!obj->is_gc_marked()) { + free_humongous_region(hr); } } else if (!hr->is_pinned()) { prepare_for_compaction(hr, hr->end()); diff --git a/src/share/vm/gc/g1/g1RemSet.inline.hpp b/src/share/vm/gc/g1/g1RemSet.inline.hpp --- a/src/share/vm/gc/g1/g1RemSet.inline.hpp +++ b/src/share/vm/gc/g1/g1RemSet.inline.hpp @@ -60,6 +60,8 @@ assert(_g1->is_in_reserved(obj), "must be in heap"); #endif // ASSERT + assert(from->is_in_reserved(p) || from->is_starts_humongous(), "p is not in from"); + HeapRegion* to = _g1->heap_region_containing(obj); if (from != to) { assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); diff --git a/src/share/vm/gc/g1/heapRegion.cpp b/src/share/vm/gc/g1/heapRegion.cpp --- a/src/share/vm/gc/g1/heapRegion.cpp +++ b/src/share/vm/gc/g1/heapRegion.cpp @@ -695,8 +695,7 @@ HeapRegion* to = _g1h->heap_region_containing(obj); if (from != NULL && to != NULL && from != to && - !to->is_pinned() && - !from->is_continues_humongous()) { + !to->is_pinned()) { jbyte cv_obj = *_bs->byte_for_const(_containing_obj); jbyte cv_field = *_bs->byte_for_const(p); const jbyte dirty = CardTableModRefBS::dirty_card_val(); @@ -808,6 +807,13 @@ _offsets.verify(); } + if (is_region_humongous) { + oop obj = oop(this->humongous_start_region()->bottom()); + if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { + gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); + } + } + if (!is_region_humongous && p != top()) { gclog_or_tty->print_cr("end of last object " PTR_FORMAT " " "does not match top " PTR_FORMAT, p2i(p), p2i(top())); @@ -906,6 +912,7 @@ } void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { + assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords"); Space::set_end(new_end); _offsets.resize(new_end - bottom()); } diff --git a/src/share/vm/gc/g1/heapRegion.hpp b/src/share/vm/gc/g1/heapRegion.hpp --- a/src/share/vm/gc/g1/heapRegion.hpp +++ b/src/share/vm/gc/g1/heapRegion.hpp @@ -43,6 +43,15 @@ // The solution is to remove this method from the definition // of a Space. +// Each heap region is self contained. top() and end() can never +// be set beyond the end of the region. For humongous objects, +// the first region is a StartsHumongous region. If the humongous +// object is larger than a heap region, the following regions will +// be of type ContinuesHumongous. In this case the top() and end() +// of the StartHumongous region will point to the end of that region. +// The same will be true for all ContinuesHumongous regions except +// the last, which will have its' top() at the objects' top. + class G1CollectedHeap; class HeapRegionRemSet; class HeapRegionRemSetIterator; @@ -564,6 +573,9 @@ bool is_marked() { return _prev_top_at_mark_start != bottom(); } void reset_during_compaction() { + assert(is_humongous(), + "should only be called for humongous regions"); + zero_marked_bytes(); init_top_at_mark_start(); } diff --git a/src/share/vm/gc/g1/heapRegion.inline.hpp b/src/share/vm/gc/g1/heapRegion.inline.hpp --- a/src/share/vm/gc/g1/heapRegion.inline.hpp +++ b/src/share/vm/gc/g1/heapRegion.inline.hpp @@ -118,6 +118,12 @@ if (!this->is_in(p)) { HeapRegion* hr = g1h->heap_region_containing(p); +#ifdef ASSERT + assert(hr->is_humongous(), "This case can only happen for humongous regions"); + oop obj = oop(hr->humongous_start_region()->bottom()); + assert((HeapWord*)obj <= p, "p must be in humongous object"); + assert(p <= (HeapWord*)obj + obj->size(), "p must be in humongous object"); +#endif return hr->block_is_obj(p); } if (ClassUnloadingWithConcurrentMark) { # HG changeset patch # User david # Date 1446716427 -3600 # Thu Nov 05 10:40:27 2015 +0100 # Node ID a26150c4384c686392009adc86f0a324c7d16bc7 # Parent 4c58232b7425c01d7b4fe48ce288a7d44d416fdf imported patch rev3 diff --git a/src/share/vm/gc/g1/concurrentMark.inline.hpp b/src/share/vm/gc/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc/g1/concurrentMark.inline.hpp +++ b/src/share/vm/gc/g1/concurrentMark.inline.hpp @@ -128,8 +128,8 @@ do { MemRegion mr(hr->bottom(), hr->top()); count_region(mr, hr, marked_bytes_array, task_card_bm); - hr = _g1h->next_region_by_index(hr); - } while (hr != NULL && hr->is_continues_humongous()); + hr = _g1h->next_humongous_region(hr); + } while (hr != NULL); } } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -366,13 +366,7 @@ // we can update top of the "starts humongous" region. first_hr->set_top(MIN2(first_hr->end(), obj_top)); if (_hr_printer.is_active()) { - if ((first + 1) == last) { - // the series has a single humongous region - _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, obj_top); - } else { - // the series has more than one humongous regions - _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end()); - } + _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end()); } // Now, we will update the top fields of the "continues humongous" @@ -5765,13 +5759,13 @@ next_bitmap->clear(r->bottom()); } do { - HeapRegion* next = g1h->next_region_by_index(r); + HeapRegion* next = g1h->next_humongous_region(r); _freed_bytes += r->used(); r->set_containing_set(NULL); _humongous_regions_removed.increment(1u, r->capacity()); g1h->free_humongous_region(r, _free_region_list, false); r = next; - } while (r != NULL && r->is_continues_humongous()); + } while (r != NULL); return false; } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.hpp b/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp @@ -1242,8 +1242,8 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* region_at(uint index) const; - // Return the next region (or null if no more region is available). - inline HeapRegion* next_region_by_index(HeapRegion* hr) const; + // Return the next region (by index) if that region is also humongous, NULL otherwise. + inline HeapRegion* next_humongous_region(HeapRegion* hr) const; // Calculate the region index of the given address. Given address must be // within the heap. diff --git a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp @@ -65,8 +65,8 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } -inline HeapRegion* G1CollectedHeap::next_region_by_index(HeapRegion* hr) const { - return _hrm.next_region_by_index(hr); +inline HeapRegion* G1CollectedHeap::next_humongous_region(HeapRegion* hr) const { + return _hrm.next_humongous_region(hr); } inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { diff --git a/src/share/vm/gc/g1/g1HRPrinter.cpp b/src/share/vm/gc/g1/g1HRPrinter.cpp --- a/src/share/vm/gc/g1/g1HRPrinter.cpp +++ b/src/share/vm/gc/g1/g1HRPrinter.cpp @@ -51,7 +51,6 @@ case Eden: return "Eden"; case Survivor: return "Survivor"; case Old: return "Old"; - case SingleHumongous: return "SingleH"; case StartsHumongous: return "StartsH"; case ContinuesHumongous: return "ContinuesH"; case Archive: return "Archive"; diff --git a/src/share/vm/gc/g1/g1HRPrinter.hpp b/src/share/vm/gc/g1/g1HRPrinter.hpp --- a/src/share/vm/gc/g1/g1HRPrinter.hpp +++ b/src/share/vm/gc/g1/g1HRPrinter.hpp @@ -50,7 +50,6 @@ Eden, Survivor, Old, - SingleHumongous, StartsHumongous, ContinuesHumongous, Archive diff --git a/src/share/vm/gc/g1/heapRegionManager.hpp b/src/share/vm/gc/g1/heapRegionManager.hpp --- a/src/share/vm/gc/g1/heapRegionManager.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.hpp @@ -150,8 +150,8 @@ // is valid. inline HeapRegion* at(uint index) const; - // Return the next region (or null if no more region is available). - inline HeapRegion* next_region_by_index(HeapRegion* hr) const; + // Return the next region (by index) if that region is also humongous, NULL otherwise. + inline HeapRegion* next_humongous_region(HeapRegion* hr) const; // If addr is within the committed space return its corresponding // HeapRegion, otherwise return NULL. diff --git a/src/share/vm/gc/g1/heapRegionManager.inline.hpp b/src/share/vm/gc/g1/heapRegionManager.inline.hpp --- a/src/share/vm/gc/g1/heapRegionManager.inline.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.inline.hpp @@ -47,18 +47,18 @@ return hr; } -inline HeapRegion* HeapRegionManager::next_region_by_index(HeapRegion* hr) const { +inline HeapRegion* HeapRegionManager::next_humongous_region(HeapRegion* hr) const { uint index = hr->hrm_index(); assert(is_available(index), "pre-condition"); + assert(hr->is_humongous(), "next_humongous_region should only be called for a humongous region."); index++; - if (index < max_length() && is_available(index)) { + if (index < max_length() && is_available(index) && at(index)->is_continues_humongous()) { return at(index); } else { return NULL; } } - inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) { _free_list.add_ordered(hr); } # HG changeset patch # User david # Date 1446727479 -3600 # Thu Nov 05 13:44:39 2015 +0100 # Node ID 329d2454f84b729b8ba6761690a8fc9f0a5d6707 # Parent a26150c4384c686392009adc86f0a324c7d16bc7 [mq]: rev4 diff --git a/src/share/vm/gc/g1/concurrentMark.cpp b/src/share/vm/gc/g1/concurrentMark.cpp --- a/src/share/vm/gc/g1/concurrentMark.cpp +++ b/src/share/vm/gc/g1/concurrentMark.cpp @@ -1433,12 +1433,25 @@ size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); size_t act_marked_bytes = hr->next_marked_bytes(); - // We're not OK if expected marked bytes > actual marked bytes. It means - // we have missed accounting some objects during the actual marking. - // For start_humongous regions, the size of the whole object will be - // in exp_marked_bytes, so this check does not apply in this case. - if (exp_marked_bytes > act_marked_bytes && !hr->is_starts_humongous()) { - failures += 1; + if (exp_marked_bytes > act_marked_bytes) { + if (hr->is_starts_humongous()) { + // For start_humongous regions, the size of the whole object will be + // in exp_marked_bytes. + HeapRegion* region = hr; + int num_regions; + for (num_regions = 0; region != NULL; num_regions++) { + region = _g1h->next_region_in_humongous(region); + } + if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { + failures += 1; + } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { + failures += 1; + } + } else { + // We're not OK if expected marked bytes > actual marked bytes. It means + // we have missed accounting some objects during the actual marking. + failures += 1; + } } // Verify the bit, for this region, in the actual and expected diff --git a/src/share/vm/gc/g1/concurrentMark.inline.hpp b/src/share/vm/gc/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc/g1/concurrentMark.inline.hpp +++ b/src/share/vm/gc/g1/concurrentMark.inline.hpp @@ -128,7 +128,7 @@ do { MemRegion mr(hr->bottom(), hr->top()); count_region(mr, hr, marked_bytes_array, task_card_bm); - hr = _g1h->next_humongous_region(hr); + hr = _g1h->next_region_in_humongous(hr); } while (hr != NULL); } } diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -366,7 +366,7 @@ // we can update top of the "starts humongous" region. first_hr->set_top(MIN2(first_hr->end(), obj_top)); if (_hr_printer.is_active()) { - _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end()); + _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top()); } // Now, we will update the top fields of the "continues humongous" @@ -1128,10 +1128,9 @@ // We'll assert that the strong code root list and RSet is empty assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); assert(hrrs->occupied() == 0, "RSet should be empty"); - return false; + } else { + hrrs->clear(); } - - hrrs->clear(); // You might think here that we could clear just the cards // corresponding to the used region. But no: if we leave a dirty card // in a region we might allocate into, then it would prevent that card @@ -5759,7 +5758,7 @@ next_bitmap->clear(r->bottom()); } do { - HeapRegion* next = g1h->next_humongous_region(r); + HeapRegion* next = g1h->next_region_in_humongous(r); _freed_bytes += r->used(); r->set_containing_set(NULL); _humongous_regions_removed.increment(1u, r->capacity()); diff --git a/src/share/vm/gc/g1/g1CollectedHeap.hpp b/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp @@ -1242,8 +1242,9 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* region_at(uint index) const; - // Return the next region (by index) if that region is also humongous, NULL otherwise. - inline HeapRegion* next_humongous_region(HeapRegion* hr) const; + // Return the next region (by index) that is part of the same + // humongous object that hr is part of. + inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // Calculate the region index of the given address. Given address must be // within the heap. diff --git a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp @@ -65,8 +65,8 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } -inline HeapRegion* G1CollectedHeap::next_humongous_region(HeapRegion* hr) const { - return _hrm.next_humongous_region(hr); +inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const { + return _hrm.next_region_in_humongous(hr); } inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { diff --git a/src/share/vm/gc/g1/heapRegion.hpp b/src/share/vm/gc/g1/heapRegion.hpp --- a/src/share/vm/gc/g1/heapRegion.hpp +++ b/src/share/vm/gc/g1/heapRegion.hpp @@ -47,10 +47,10 @@ // be set beyond the end of the region. For humongous objects, // the first region is a StartsHumongous region. If the humongous // object is larger than a heap region, the following regions will -// be of type ContinuesHumongous. In this case the top() and end() -// of the StartHumongous region will point to the end of that region. -// The same will be true for all ContinuesHumongous regions except -// the last, which will have its' top() at the objects' top. +// be of type ContinuesHumongous. In this case the top() of the +// StartHumongous region and all ContinuesHumongous regions except +// the last will point to their own end. For the last ContinuesHumongous +// region, top() will equal the object's top. class G1CollectedHeap; class HeapRegionRemSet; diff --git a/src/share/vm/gc/g1/heapRegion.inline.hpp b/src/share/vm/gc/g1/heapRegion.inline.hpp --- a/src/share/vm/gc/g1/heapRegion.inline.hpp +++ b/src/share/vm/gc/g1/heapRegion.inline.hpp @@ -117,14 +117,8 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (!this->is_in(p)) { - HeapRegion* hr = g1h->heap_region_containing(p); -#ifdef ASSERT - assert(hr->is_humongous(), "This case can only happen for humongous regions"); - oop obj = oop(hr->humongous_start_region()->bottom()); - assert((HeapWord*)obj <= p, "p must be in humongous object"); - assert(p <= (HeapWord*)obj + obj->size(), "p must be in humongous object"); -#endif - return hr->block_is_obj(p); + assert(is_continues_humongous(), "This case can only happen for humongous regions"); + return (p == humongous_start_region()->bottom()); } if (ClassUnloadingWithConcurrentMark) { return !g1h->is_obj_dead(oop(p), this); diff --git a/src/share/vm/gc/g1/heapRegionManager.hpp b/src/share/vm/gc/g1/heapRegionManager.hpp --- a/src/share/vm/gc/g1/heapRegionManager.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.hpp @@ -150,8 +150,9 @@ // is valid. inline HeapRegion* at(uint index) const; - // Return the next region (by index) if that region is also humongous, NULL otherwise. - inline HeapRegion* next_humongous_region(HeapRegion* hr) const; + // Return the next region (by index) that is part of the same + // humongous object that hr is part of. + inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // If addr is within the committed space return its corresponding // HeapRegion, otherwise return NULL. diff --git a/src/share/vm/gc/g1/heapRegionManager.inline.hpp b/src/share/vm/gc/g1/heapRegionManager.inline.hpp --- a/src/share/vm/gc/g1/heapRegionManager.inline.hpp +++ b/src/share/vm/gc/g1/heapRegionManager.inline.hpp @@ -47,10 +47,10 @@ return hr; } -inline HeapRegion* HeapRegionManager::next_humongous_region(HeapRegion* hr) const { +inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) const { uint index = hr->hrm_index(); assert(is_available(index), "pre-condition"); - assert(hr->is_humongous(), "next_humongous_region should only be called for a humongous region."); + assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region."); index++; if (index < max_length() && is_available(index) && at(index)->is_continues_humongous()) { return at(index);