< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page

        

*** 123,132 **** --- 123,137 ---- ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; const ParallelCompactData::RegionData::region_sz_t ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; + const int ParallelCompactData::RegionData::UNUSED = 0; + const int ParallelCompactData::RegionData::SHADOW = 1; + const int ParallelCompactData::RegionData::FILLED = 2; + const int ParallelCompactData::RegionData::FINISH = 3; + SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer; ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
*** 1021,1030 **** --- 1026,1036 ---- } void PSParallelCompact::post_compact() { GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); + ParCompactionManager::dequeue_shadow_region(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data.
*** 2415,2426 **** --- 2421,2434 ---- sd.addr_to_region_idx(sd.region_align_up(new_top)); for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { if (sd.region(cur)->claim_unsafe()) { ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id); + if (sd.region(cur)->try_push()) { cm->region_stack()->push(cur); region_logger.handle(cur); + } // Assign regions to tasks in round-robin fashion. if (++worker_id == parallel_gc_threads) { worker_id = 0; } }
*** 2596,2609 **** --- 2604,2622 ---- guarantee(cm->region_stack()->is_empty(), "Not empty"); size_t region_index = 0; + PSParallelCompact::initialize_steal_record(worker_id); while (true) { if (ParCompactionManager::steal(worker_id, region_index)) { PSParallelCompact::fill_and_update_region(cm, region_index); cm->drain_region_stacks(); + } else if (PSParallelCompact::steal_shadow_region(cm, region_index)) { + // Keep working with the help of shadow regions + PSParallelCompact::fill_and_update_shadow_region(cm, region_index); + cm->drain_region_stacks(); } else { if (terminator->offer_termination()) { break; } // Go around again.
*** 2654,2663 **** --- 2667,2677 ---- // push // push // // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1) TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)); + enqueue_shadow_region(); prepare_region_draining_tasks(active_gc_threads); enqueue_dense_prefix_tasks(task_queue, active_gc_threads); { GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
*** 2960,2970 **** --- 2974,2992 ---- for (RegionData* cur = beg; cur < end; ++cur) { assert(cur->data_size() > 0, "region must have live data"); cur->decrement_destination_count(); if (cur < enqueue_end && cur->available() && cur->claim()) { + if (cur->try_push()) { cm->push_region(sd.region(cur)); + } else if (cur->try_copy()) { + // Try to copy the content of the shadow region back to its corresponding + // heap region if the shadow region is filled + copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur)); + cm->release_shadow_region(cur->shadow_region()); + cur->set_completed(); + } } } } size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
*** 3038,3069 **** assert(false, "no source region was found"); return 0; } ! void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx) { typedef ParMarkBitMap::IterationStatus IterationStatus; const size_t RegionSize = ParallelCompactData::RegionSize; ParMarkBitMap* const bitmap = mark_bitmap(); ParallelCompactData& sd = summary_data(); RegionData* const region_ptr = sd.region(region_idx); - // Get the items needed to construct the closure. - HeapWord* dest_addr = sd.region_to_addr(region_idx); - SpaceId dest_space_id = space_id(dest_addr); - ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); - HeapWord* new_top = _space_info[dest_space_id].new_top(); - assert(dest_addr < new_top, "sanity"); - const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize); - // Get the source region and related info. size_t src_region_idx = region_ptr->source_region(); SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); HeapWord* src_space_top = _space_info[src_space_id].space()->top(); - MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); // Adjust src_region_idx to prepare for decrementing destination counts (the // destination count is not decremented when a region is copied to itself). if (src_region_idx == region_idx) { --- 3060,3083 ---- assert(false, "no source region was found"); return 0; } ! void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx) { typedef ParMarkBitMap::IterationStatus IterationStatus; const size_t RegionSize = ParallelCompactData::RegionSize; ParMarkBitMap* const bitmap = mark_bitmap(); ParallelCompactData& sd = summary_data(); RegionData* const region_ptr = sd.region(region_idx); // Get the source region and related info. size_t src_region_idx = region_ptr->source_region(); SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); HeapWord* src_space_top = _space_info[src_space_id].space()->top(); + HeapWord* dest_addr = sd.region_to_addr(region_idx); closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); // Adjust src_region_idx to prepare for decrementing destination counts (the // destination count is not decremented when a region is copied to itself). if (src_region_idx == region_idx) {
*** 3078,3088 **** closure.copy_partial_obj(); if (closure.is_full()) { decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); region_ptr->set_deferred_obj_addr(NULL); ! region_ptr->set_completed(); return; } HeapWord* const end_addr = sd.region_align_down(closure.source()); if (sd.region_align_down(old_src_addr) != end_addr) { --- 3092,3102 ---- closure.copy_partial_obj(); if (closure.is_full()) { decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); region_ptr->set_deferred_obj_addr(NULL); ! closure.complete_region(cm, dest_addr, region_ptr); return; } HeapWord* const end_addr = sd.region_align_down(closure.source()); if (sd.region_align_down(old_src_addr) != end_addr) {
*** 3123,3145 **** if (status == ParMarkBitMap::would_overflow) { // The last object did not fit. Note that interior oop updates were // deferred, then copy enough of the object to fill the region. region_ptr->set_deferred_obj_addr(closure.destination()); status = closure.copy_until_full(); // copies from closure.source() decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); ! region_ptr->set_completed(); return; } if (status == ParMarkBitMap::full) { decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); region_ptr->set_deferred_obj_addr(NULL); ! region_ptr->set_completed(); return; } decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); --- 3137,3160 ---- if (status == ParMarkBitMap::would_overflow) { // The last object did not fit. Note that interior oop updates were // deferred, then copy enough of the object to fill the region. region_ptr->set_deferred_obj_addr(closure.destination()); + status = closure.copy_until_full(); // copies from closure.source() decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); ! closure.complete_region(cm, dest_addr, region_ptr); return; } if (status == ParMarkBitMap::full) { decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); region_ptr->set_deferred_obj_addr(NULL); ! closure.complete_region(cm, dest_addr, region_ptr); return; } decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
*** 3148,3157 **** --- 3163,3242 ---- src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); } while (true); } + void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) { + MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); + fill_region(cm, cl, region_idx); + } + + void PSParallelCompact::fill_shadow_region(ParCompactionManager* cm, size_t region_idx) + { + // Acquire a shadow region at first + ParallelCompactData& sd = summary_data(); + RegionData* const region_ptr = sd.region(region_idx); + size_t shadow_region = cm->acquire_shadow_region(region_ptr); + // The zero return value indicates the corresponding heap region is available, + // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use + // ShadowClosure to fill the acquired shadow region. + if (shadow_region == 0) { + MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); + region_ptr->mark_normal(); + return fill_region(cm, cl, region_idx); + } else { + ShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region); + return fill_region(cm, cl, region_idx); + } + } + + void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) { + Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); + } + + bool PSParallelCompact::steal_shadow_region(ParCompactionManager* cm, size_t &region_idx) { + size_t record = cm->shadow_record(); + ParallelCompactData& sd = _summary_data; + size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); + uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); + + while (record < old_new_top) { + if (sd.region(record)->try_steal()) { + region_idx = record; + return true; + } + record = cm->next_shadow_record(active_gc_threads); + } + + return false; + } + + void PSParallelCompact::enqueue_shadow_region() { + const ParallelCompactData& sd = PSParallelCompact::summary_data(); + + for (unsigned int id = old_space_id; id < last_space_id; ++id) { + SpaceInfo* const space_info = _space_info + id; + MutableSpace* const space = space_info->space(); + + const size_t beg_region = + sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); + const size_t end_region = + sd.addr_to_region_idx(sd.region_align_down(space->end())); + + for (size_t cur = beg_region + 1; cur < end_region; ++cur) { + ParCompactionManager::enqueue_shadow_region(cur); + } + } + } + + void PSParallelCompact::initialize_steal_record(uint which) { + ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); + + size_t record = _summary_data.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); + cm->set_shadow_record(record + which); + } + void PSParallelCompact::fill_blocks(size_t region_idx) { // Fill in the block table elements for the specified region. Each block // table element holds the number of live words in the region that are to the // left of the first object that starts in the block. Thus only blocks in
*** 3220,3232 **** _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; } ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() { ! if (source() != destination()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), destination(), words_remaining()); } update_state(words_remaining()); assert(is_full(), "sanity"); return ParMarkBitMap::full; } --- 3305,3317 ---- _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; } ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() { ! if (source() != copy_destination()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), copy_destination(), words_remaining()); } update_state(words_remaining()); assert(is_full(), "sanity"); return ParMarkBitMap::full; }
*** 3241,3257 **** words = bitmap()->obj_size(source(), end_addr); } // This test is necessary; if omitted, the pointer updates to a partial object // that crosses the dense prefix boundary could be overwritten. ! if (source() != destination()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), destination(), words); } update_state(words); } ParMarkBitMapClosure::IterationStatus MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { assert(destination() != NULL, "sanity"); assert(bitmap()->obj_size(addr) == words, "bad size"); --- 3326,3348 ---- words = bitmap()->obj_size(source(), end_addr); } // This test is necessary; if omitted, the pointer updates to a partial object // that crosses the dense prefix boundary could be overwritten. ! if (source() != copy_destination()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), copy_destination(), words); } update_state(words); } + void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, + PSParallelCompact::RegionData *region_ptr) { + assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::FINISH, "Region should be finished"); + region_ptr->set_completed(); + } + ParMarkBitMapClosure::IterationStatus MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { assert(destination() != NULL, "sanity"); assert(bitmap()->obj_size(addr) == words, "bad size");
*** 3266,3289 **** // The start_array must be updated even if the object is not moving. if (_start_array != NULL) { _start_array->allocate_block(destination()); } ! if (destination() != source()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), destination(), words); } ! oop moved_oop = (oop) destination(); compaction_manager()->update_contents(moved_oop); assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)); update_state(words); ! assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; } UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : ParMarkBitMapClosure(mbm, cm), _space_id(space_id), --- 3357,3396 ---- // The start_array must be updated even if the object is not moving. if (_start_array != NULL) { _start_array->allocate_block(destination()); } ! if (copy_destination() != source()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) ! Copy::aligned_conjoint_words(source(), copy_destination(), words); } ! oop moved_oop = (oop) copy_destination(); compaction_manager()->update_contents(moved_oop); assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)); update_state(words); ! assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; } + void ShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, + PSParallelCompact::RegionData *region_ptr) { + assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::SHADOW, "Region should be shadow"); + // Record the shadow region index + region_ptr->set_shadow_region(_shadow); + // Mark the shadow region filled + region_ptr->mark_filled(); + // Try to copy the content of the shadow region back to its corresponding + // heap region if available + if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->try_copy()) { + region_ptr->set_completed(); + PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr); + cm->release_shadow_region(_shadow); + } + } + UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : ParMarkBitMapClosure(mbm, cm), _space_id(space_id),
< prev index next >