< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page
rev 56967 : [mq]: 8220465-parallel-gc-haoyu-li
rev 56968 : [mq]: 8220465-suggestions

*** 123,137 **** ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; const ParallelCompactData::RegionData::region_sz_t ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; - const int ParallelCompactData::RegionData::UNUSED = 0; - const int ParallelCompactData::RegionData::SHADOW = 1; - const int ParallelCompactData::RegionData::FILLED = 2; - const int ParallelCompactData::RegionData::FINISH = 3; - SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer; ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; --- 123,132 ----
*** 1026,1036 **** } void PSParallelCompact::post_compact() { GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); ! ParCompactionManager::dequeue_shadow_region(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data. --- 1021,1031 ---- } void PSParallelCompact::post_compact() { GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); ! ParCompactionManager::remove_all_shadow_regions(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data.
*** 2421,2434 **** sd.addr_to_region_idx(sd.region_align_up(new_top)); for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { if (sd.region(cur)->claim_unsafe()) { ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id); ! if (sd.region(cur)->try_push()) { cm->region_stack()->push(cur); region_logger.handle(cur); - } // Assign regions to tasks in round-robin fashion. if (++worker_id == parallel_gc_threads) { worker_id = 0; } } --- 2416,2429 ---- sd.addr_to_region_idx(sd.region_align_up(new_top)); for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { if (sd.region(cur)->claim_unsafe()) { ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id); ! bool try_push = sd.region(cur)->try_push(); ! assert(try_push, "Must succeed at this point."); cm->region_stack()->push(cur); region_logger.handle(cur); // Assign regions to tasks in round-robin fashion. if (++worker_id == parallel_gc_threads) { worker_id = 0; } }
*** 2667,2677 **** // push // push // // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1) TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)); ! enqueue_shadow_region(); prepare_region_draining_tasks(active_gc_threads); enqueue_dense_prefix_tasks(task_queue, active_gc_threads); { GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer); --- 2662,2672 ---- // push // push // // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1) TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)); ! initialize_shadow_regions(); prepare_region_draining_tasks(active_gc_threads); enqueue_dense_prefix_tasks(task_queue, active_gc_threads); { GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
*** 3137,3147 **** if (status == ParMarkBitMap::would_overflow) { // The last object did not fit. Note that interior oop updates were // deferred, then copy enough of the object to fill the region. region_ptr->set_deferred_obj_addr(closure.destination()); - status = closure.copy_until_full(); // copies from closure.source() decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); closure.complete_region(cm, dest_addr, region_ptr); --- 3132,3141 ----
*** 3163,3180 **** src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); } while (true); } ! void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) { MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); fill_region(cm, cl, region_idx); } ! void PSParallelCompact::fill_shadow_region(ParCompactionManager* cm, size_t region_idx) { ! // Acquire a shadow region at first ParallelCompactData& sd = summary_data(); RegionData* const region_ptr = sd.region(region_idx); size_t shadow_region = cm->acquire_shadow_region(region_ptr); // The zero return value indicates the corresponding heap region is available, // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use --- 3157,3175 ---- src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); } while (true); } ! void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) ! { MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); fill_region(cm, cl, region_idx); } ! void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx) { ! // Acquire a shadow region first ParallelCompactData& sd = summary_data(); RegionData* const region_ptr = sd.region(region_idx); size_t shadow_region = cm->acquire_shadow_region(region_ptr); // The zero return value indicates the corresponding heap region is available, // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
*** 3182,3201 **** if (shadow_region == 0) { MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); region_ptr->mark_normal(); return fill_region(cm, cl, region_idx); } else { ! ShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region); return fill_region(cm, cl, region_idx); } } ! void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) { Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); } ! bool PSParallelCompact::steal_shadow_region(ParCompactionManager* cm, size_t &region_idx) { size_t record = cm->shadow_record(); ParallelCompactData& sd = _summary_data; size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); --- 3177,3198 ---- if (shadow_region == 0) { MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx); region_ptr->mark_normal(); return fill_region(cm, cl, region_idx); } else { ! MoveAndUpdateShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region); return fill_region(cm, cl, region_idx); } } ! void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) ! { Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); } ! bool PSParallelCompact::steal_shadow_region(ParCompactionManager* cm, size_t &region_idx) ! { size_t record = cm->shadow_record(); ParallelCompactData& sd = _summary_data; size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
*** 3208,3218 **** } return false; } ! void PSParallelCompact::enqueue_shadow_region() { const ParallelCompactData& sd = PSParallelCompact::summary_data(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { SpaceInfo* const space_info = _space_info + id; MutableSpace* const space = space_info->space(); --- 3205,3216 ---- } return false; } ! void PSParallelCompact::initialize_shadow_regions() ! { const ParallelCompactData& sd = PSParallelCompact::summary_data(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { SpaceInfo* const space_info = _space_info + id; MutableSpace* const space = space_info->space();
*** 3221,3236 **** sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); const size_t end_region = sd.addr_to_region_idx(sd.region_align_down(space->end())); for (size_t cur = beg_region + 1; cur < end_region; ++cur) { ! ParCompactionManager::enqueue_shadow_region(cur); } } } ! void PSParallelCompact::initialize_steal_record(uint which) { ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); size_t record = _summary_data.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); cm->set_shadow_record(record + which); } --- 3219,3235 ---- sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); const size_t end_region = sd.addr_to_region_idx(sd.region_align_down(space->end())); for (size_t cur = beg_region + 1; cur < end_region; ++cur) { ! ParCompactionManager::add_shadow_region(cur); } } } ! void PSParallelCompact::initialize_steal_record(uint which) ! { ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); size_t record = _summary_data.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); cm->set_shadow_record(record + which); }
*** 3335,3345 **** update_state(words); } void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, PSParallelCompact::RegionData *region_ptr) { ! assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::FINISH, "Region should be finished"); region_ptr->set_completed(); } ParMarkBitMapClosure::IterationStatus MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { --- 3334,3344 ---- update_state(words); } void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, PSParallelCompact::RegionData *region_ptr) { ! assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::FinishedShadow, "Region should be finished"); region_ptr->set_completed(); } ParMarkBitMapClosure::IterationStatus MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
*** 3371,3383 **** update_state(words); assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; } ! void ShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, PSParallelCompact::RegionData *region_ptr) { ! assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::SHADOW, "Region should be shadow"); // Record the shadow region index region_ptr->set_shadow_region(_shadow); // Mark the shadow region filled region_ptr->mark_filled(); // Try to copy the content of the shadow region back to its corresponding --- 3370,3382 ---- update_state(words); assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; } ! void MoveAndUpdateShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr, PSParallelCompact::RegionData *region_ptr) { ! assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::Shadow, "Region should be shadow"); // Record the shadow region index region_ptr->set_shadow_region(_shadow); // Mark the shadow region filled region_ptr->mark_filled(); // Try to copy the content of the shadow region back to its corresponding
< prev index next >