--- old/src/share/vm/gc/g1/concurrentMark.cpp 2015-11-05 13:45:01.206427904 +0100 +++ new/src/share/vm/gc/g1/concurrentMark.cpp 2015-11-05 13:45:01.110427428 +0100 @@ -1433,12 +1433,25 @@ size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); size_t act_marked_bytes = hr->next_marked_bytes(); - // We're not OK if expected marked bytes > actual marked bytes. It means - // we have missed accounting some objects during the actual marking. - // For start_humongous regions, the size of the whole object will be - // in exp_marked_bytes, so this check does not apply in this case. - if (exp_marked_bytes > act_marked_bytes && !hr->is_starts_humongous()) { - failures += 1; + if (exp_marked_bytes > act_marked_bytes) { + if (hr->is_starts_humongous()) { + // For start_humongous regions, the size of the whole object will be + // in exp_marked_bytes. + HeapRegion* region = hr; + int num_regions; + for (num_regions = 0; region != NULL; num_regions++) { + region = _g1h->next_region_in_humongous(region); + } + if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { + failures += 1; + } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { + failures += 1; + } + } else { + // We're not OK if expected marked bytes > actual marked bytes. It means + // we have missed accounting some objects during the actual marking. + failures += 1; + } } // Verify the bit, for this region, in the actual and expected --- old/src/share/vm/gc/g1/concurrentMark.inline.hpp 2015-11-05 13:45:01.410428916 +0100 +++ new/src/share/vm/gc/g1/concurrentMark.inline.hpp 2015-11-05 13:45:01.314428439 +0100 @@ -128,7 +128,7 @@ do { MemRegion mr(hr->bottom(), hr->top()); count_region(mr, hr, marked_bytes_array, task_card_bm); - hr = _g1h->next_humongous_region(hr); + hr = _g1h->next_region_in_humongous(hr); } while (hr != NULL); } } --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-11-05 13:45:01.586429788 +0100 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-11-05 13:45:01.490429312 +0100 @@ -366,7 +366,7 @@ // we can update top of the "starts humongous" region. first_hr->set_top(MIN2(first_hr->end(), obj_top)); if (_hr_printer.is_active()) { - _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end()); + _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top()); } // Now, we will update the top fields of the "continues humongous" @@ -1128,10 +1128,9 @@ // We'll assert that the strong code root list and RSet is empty assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); assert(hrrs->occupied() == 0, "RSet should be empty"); - return false; + } else { + hrrs->clear(); } - - hrrs->clear(); // You might think here that we could clear just the cards // corresponding to the used region. But no: if we leave a dirty card // in a region we might allocate into, then it would prevent that card @@ -5759,7 +5758,7 @@ next_bitmap->clear(r->bottom()); } do { - HeapRegion* next = g1h->next_humongous_region(r); + HeapRegion* next = g1h->next_region_in_humongous(r); _freed_bytes += r->used(); r->set_containing_set(NULL); _humongous_regions_removed.increment(1u, r->capacity()); --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-11-05 13:45:01.826430978 +0100 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-11-05 13:45:01.734430522 +0100 @@ -1242,8 +1242,9 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* region_at(uint index) const; - // Return the next region (by index) if that region is also humongous, NULL otherwise. - inline HeapRegion* next_humongous_region(HeapRegion* hr) const; + // Return the next region (by index) that is part of the same + // humongous object that hr is part of. + inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // Calculate the region index of the given address. Given address must be // within the heap. --- old/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp 2015-11-05 13:45:02.002431851 +0100 +++ new/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp 2015-11-05 13:45:01.918431435 +0100 @@ -65,8 +65,8 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } -inline HeapRegion* G1CollectedHeap::next_humongous_region(HeapRegion* hr) const { - return _hrm.next_humongous_region(hr); +inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const { + return _hrm.next_region_in_humongous(hr); } inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { --- old/src/share/vm/gc/g1/heapRegion.hpp 2015-11-05 13:45:02.158432625 +0100 +++ new/src/share/vm/gc/g1/heapRegion.hpp 2015-11-05 13:45:02.078432228 +0100 @@ -47,10 +47,10 @@ // be set beyond the end of the region. For humongous objects, // the first region is a StartsHumongous region. If the humongous // object is larger than a heap region, the following regions will -// be of type ContinuesHumongous. In this case the top() and end() -// of the StartHumongous region will point to the end of that region. -// The same will be true for all ContinuesHumongous regions except -// the last, which will have its' top() at the objects' top. +// be of type ContinuesHumongous. In this case the top() of the +// StartHumongous region and all ContinuesHumongous regions except +// the last will point to their own end. For the last ContinuesHumongous +// region, top() will equal the object's top. class G1CollectedHeap; class HeapRegionRemSet; --- old/src/share/vm/gc/g1/heapRegion.inline.hpp 2015-11-05 13:45:02.326433458 +0100 +++ new/src/share/vm/gc/g1/heapRegion.inline.hpp 2015-11-05 13:45:02.242433041 +0100 @@ -117,14 +117,8 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (!this->is_in(p)) { - HeapRegion* hr = g1h->heap_region_containing(p); -#ifdef ASSERT - assert(hr->is_humongous(), "This case can only happen for humongous regions"); - oop obj = oop(hr->humongous_start_region()->bottom()); - assert((HeapWord*)obj <= p, "p must be in humongous object"); - assert(p <= (HeapWord*)obj + obj->size(), "p must be in humongous object"); -#endif - return hr->block_is_obj(p); + assert(is_continues_humongous(), "This case can only happen for humongous regions"); + return (p == humongous_start_region()->bottom()); } if (ClassUnloadingWithConcurrentMark) { return !g1h->is_obj_dead(oop(p), this); --- old/src/share/vm/gc/g1/heapRegionManager.hpp 2015-11-05 13:45:02.494434291 +0100 +++ new/src/share/vm/gc/g1/heapRegionManager.hpp 2015-11-05 13:45:02.402433835 +0100 @@ -150,8 +150,9 @@ // is valid. inline HeapRegion* at(uint index) const; - // Return the next region (by index) if that region is also humongous, NULL otherwise. - inline HeapRegion* next_humongous_region(HeapRegion* hr) const; + // Return the next region (by index) that is part of the same + // humongous object that hr is part of. + inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // If addr is within the committed space return its corresponding // HeapRegion, otherwise return NULL. --- old/src/share/vm/gc/g1/heapRegionManager.inline.hpp 2015-11-05 13:45:02.674435183 +0100 +++ new/src/share/vm/gc/g1/heapRegionManager.inline.hpp 2015-11-05 13:45:02.578434707 +0100 @@ -47,10 +47,10 @@ return hr; } -inline HeapRegion* HeapRegionManager::next_humongous_region(HeapRegion* hr) const { +inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) const { uint index = hr->hrm_index(); assert(is_available(index), "pre-condition"); - assert(hr->is_humongous(), "next_humongous_region should only be called for a humongous region."); + assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region."); index++; if (index < max_length() && is_available(index) && at(index)->is_continues_humongous()) { return at(index);