< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page

        

*** 58,102 **** #include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/growableArray.hpp" ! // Concurrent marking bit map wrapper ! ! G1CMBitMapRO::G1CMBitMapRO(int shifter) : ! _bm(), ! _shifter(shifter) { ! _bmStartWord = 0; ! _bmWordSize = 0; ! } ! ! HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, ! const HeapWord* limit) const { ! // First we must round addr *up* to a possible object boundary. ! addr = align_up(addr, HeapWordSize << _shifter); ! size_t addrOffset = heapWordToOffset(addr); ! assert(limit != NULL, "limit must not be NULL"); ! size_t limitOffset = heapWordToOffset(limit); ! size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); ! HeapWord* nextAddr = offsetToHeapWord(nextOffset); ! assert(nextAddr >= addr, "get_next_one postcondition"); ! assert(nextAddr == limit || isMarked(nextAddr), ! "get_next_one postcondition"); ! return nextAddr; ! } ! ! #ifndef PRODUCT ! bool G1CMBitMapRO::covers(MemRegion heap_rs) const { ! // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); ! assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, ! "size inconsistency"); ! return _bmStartWord == (HeapWord*)(heap_rs.start()) && ! _bmWordSize == heap_rs.word_size(); ! } ! #endif ! ! void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { _bm.print_on_error(st, prefix); } size_t G1CMBitMap::compute_size(size_t heap_size) { return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); --- 58,68 ---- #include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/growableArray.hpp" ! void G1CMBitMap::print_on_error(outputStream* st, const char* prefix) const { _bm.print_on_error(st, prefix); } size_t G1CMBitMap::compute_size(size_t heap_size) { return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
*** 105,118 **** size_t G1CMBitMap::mark_distance() { return MinObjAlignmentInBytes * BitsPerByte; } void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { ! _bmStartWord = heap.start(); ! _bmWordSize = heap.word_size(); ! _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); storage->set_mapping_changed_listener(&_listener); } void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { --- 71,83 ---- size_t G1CMBitMap::mark_distance() { return MinObjAlignmentInBytes * BitsPerByte; } void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { ! _covered = heap; ! _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _covered.word_size() >> _shifter); storage->set_mapping_changed_listener(&_listener); } void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
*** 123,137 **** MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); _bm->clear_range(mr); } void G1CMBitMap::clear_range(MemRegion mr) { ! mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); ! assert(!mr.is_empty(), "unexpected empty region"); // convert address range into offset range ! _bm.at_put_range(heapWordToOffset(mr.start()), ! heapWordToOffset(mr.end()), false); } G1CMMarkStack::G1CMMarkStack() : _max_chunk_capacity(0), _base(NULL), --- 88,104 ---- MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); _bm->clear_range(mr); } void G1CMBitMap::clear_range(MemRegion mr) { ! MemRegion intersection = mr.intersection(_covered); ! assert(!intersection.is_empty(), ! "Given range from " PTR_FORMAT " to " PTR_FORMAT " is completely outside the heap", ! p2i(mr.start()), p2i(mr.end())); // convert address range into offset range ! _bm.at_put_range(addr_to_offset(intersection.start()), ! addr_to_offset(intersection.end()), false); } G1CMMarkStack::G1CMMarkStack() : _max_chunk_capacity(0), _base(NULL),
*** 436,447 **** if (_cmThread->osthread() == NULL) { vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); } assert(CGC_lock != NULL, "Where's the CGC_lock?"); - assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); - assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); satb_qs.set_buffer_size(G1SATBBufferSize); _root_regions.init(_g1h->survivor(), this); --- 403,412 ----
*** 751,761 **** guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); } void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); ! clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); } class CheckBitmapClearHRClosure : public HeapRegionClosure { G1CMBitMap* _bitmap; bool _error; --- 716,726 ---- guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); } void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); ! clear_bitmap(_prevMarkBitMap, workers, false); } class CheckBitmapClearHRClosure : public HeapRegionClosure { G1CMBitMap* _bitmap; bool _error;
*** 767,777 **** // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. // end never changes in G1. HeapWord* end = r->end(); ! return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; } }; bool G1ConcurrentMark::nextMarkBitmapIsClear() { CheckBitmapClearHRClosure cl(_nextMarkBitMap); --- 732,742 ---- // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. // end never changes in G1. HeapWord* end = r->end(); ! return _bitmap->get_next_marked_addr(r->bottom(), end) != end; } }; bool G1ConcurrentMark::nextMarkBitmapIsClear() { CheckBitmapClearHRClosure cl(_nextMarkBitMap);
*** 787,797 **** } }; void G1ConcurrentMark::checkpointRootsInitialPre() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1Policy* g1p = g1h->g1_policy(); _has_aborted = false; // Initialize marking structures. This has to be done in a STW phase. reset(); --- 752,761 ----
*** 1762,1774 **** } } void G1ConcurrentMark::swapMarkBitMaps() { ! G1CMBitMapRO* temp = _prevMarkBitMap; ! _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; ! _nextMarkBitMap = (G1CMBitMap*) temp; } // Closure for marking entries in SATB buffers. class G1CMSATBBufferClosure : public SATBBufferClosure { private: --- 1726,1738 ---- } } void G1ConcurrentMark::swapMarkBitMaps() { ! G1CMBitMap* temp = _prevMarkBitMap; ! _prevMarkBitMap = _nextMarkBitMap; ! _nextMarkBitMap = temp; } // Closure for marking entries in SATB buffers. class G1CMSATBBufferClosure : public SATBBufferClosure { private:
*** 1909,1921 **** print_stats(); } void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { ! // Note we are overriding the read-only view of the prev map here, via ! // the cast. ! ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); } HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) { // "checkpoint" the finger --- 1873,1883 ---- print_stats(); } void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { ! _prevMarkBitMap->clear_range(mr); } HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) { // "checkpoint" the finger
*** 2158,2183 **** p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); _prevMarkBitMap->print_on_error(st, " Prev Bits: "); _nextMarkBitMap->print_on_error(st, " Next Bits: "); } ! // Closure for iteration over bitmaps ! class G1CMBitMapClosure : public BitMapClosure { ! private: ! // the bitmap that is being iterated over ! G1CMBitMap* _nextMarkBitMap; ! G1ConcurrentMark* _cm; ! G1CMTask* _task; ! ! public: ! G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : ! _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } ! ! bool do_bit(size_t offset) { ! HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); ! assert(_nextMarkBitMap->isMarked(addr), "invariant"); ! assert( addr < _cm->finger(), "invariant"); assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr); --- 2120,2131 ---- p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); _prevMarkBitMap->print_on_error(st, " Prev Bits: "); _nextMarkBitMap->print_on_error(st, " Next Bits: "); } ! bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { ! assert(addr < _cm->finger(), "invariant"); assert(addr >= _task->finger(), "invariant"); // We move that task's local finger along. _task->move_finger_to(addr);
*** 2187,2198 **** _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of // the iteration return !_task->has_aborted(); ! } ! }; static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { ReferenceProcessor* result = g1h->ref_processor_cm(); assert(result != NULL, "CM reference processor should not be NULL"); return result; --- 2135,2145 ---- _task->drain_global_stack(true); // if the has_aborted flag has been raised, we need to bail out of // the iteration return !_task->has_aborted(); ! } static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { ReferenceProcessor* result = g1h->ref_processor_cm(); assert(result != NULL, "CM reference processor should not be NULL"); return result;
*** 2689,2699 **** ++_calls; // Set up the bitmap and oop closures. Anything that uses them is // eventually called from this method, so it is OK to allocate these // statically. ! G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); G1CMOopClosure cm_oop_closure(_g1h, _cm, this); set_cm_oop_closure(&cm_oop_closure); if (_cm->has_overflown()) { // This can happen if the mark stack overflows during a GC pause --- 2636,2646 ---- ++_calls; // Set up the bitmap and oop closures. Anything that uses them is // eventually called from this method, so it is OK to allocate these // statically. ! G1CMBitMapClosure bitmap_closure(this, _cm); G1CMOopClosure cm_oop_closure(_g1h, _cm, this); set_cm_oop_closure(&cm_oop_closure); if (_cm->has_overflown()) { // This can happen if the mark stack overflows during a GC pause
*** 2745,2758 **** // If the iteration is successful, give up the region. if (mr.is_empty()) { giveup_current_region(); regular_clock_call(); } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { ! if (_nextMarkBitMap->isMarked(mr.start())) { // The object is marked - apply the closure ! BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); ! bitmap_closure.do_bit(offset); } // Even if this task aborted while scanning the humongous object // we can (and should) give up the current region. giveup_current_region(); regular_clock_call(); --- 2692,2704 ---- // If the iteration is successful, give up the region. if (mr.is_empty()) { giveup_current_region(); regular_clock_call(); } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { ! if (_nextMarkBitMap->is_marked(mr.start())) { // The object is marked - apply the closure ! bitmap_closure.do_addr(mr.start()); } // Even if this task aborted while scanning the humongous object // we can (and should) give up the current region. giveup_current_region(); regular_clock_call();
*** 2770,2784 **** // Region iteration was actually aborted. So now _finger // points to the address of the object we last scanned. If we // leave it there, when we restart this task, we will rescan // the object. It is easy to avoid this. We move the finger by ! // enough to point to the next possible object header (the ! // bitmap knows by how much we need to move it as it knows its ! // granularity). assert(_finger < _region_limit, "invariant"); ! HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); // Check if bitmap iteration was aborted while scanning the last object if (new_finger >= _region_limit) { giveup_current_region(); } else { move_finger_to(new_finger); --- 2716,2728 ---- // Region iteration was actually aborted. So now _finger // points to the address of the object we last scanned. If we // leave it there, when we restart this task, we will rescan // the object. It is easy to avoid this. We move the finger by ! // enough to point to the next possible object header. assert(_finger < _region_limit, "invariant"); ! HeapWord* const new_finger = _finger + ((oop)_finger)->size(); // Check if bitmap iteration was aborted while scanning the last object if (new_finger >= _region_limit) { giveup_current_region(); } else { move_finger_to(new_finger);
< prev index next >