23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
31 #include "gc/parallel/pcTasks.hpp"
32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
33 #include "gc/parallel/psCompactionManager.inline.hpp"
34 #include "gc/parallel/psMarkSweep.hpp"
35 #include "gc/parallel/psMarkSweepDecorator.hpp"
36 #include "gc/parallel/psOldGen.hpp"
37 #include "gc/parallel/psParallelCompact.inline.hpp"
38 #include "gc/parallel/psPromotionManager.inline.hpp"
39 #include "gc/parallel/psScavenge.hpp"
40 #include "gc/parallel/psYoungGen.hpp"
41 #include "gc/shared/gcCause.hpp"
42 #include "gc/shared/gcHeapSummary.hpp"
43 #include "gc/shared/gcLocker.inline.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.hpp"
47 #include "gc/shared/isGCActiveMark.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/referenceProcessor.hpp"
50 #include "gc/shared/spaceDecorator.hpp"
51 #include "oops/instanceKlass.inline.hpp"
52 #include "oops/instanceMirrorKlass.inline.hpp"
53 #include "oops/methodData.hpp"
54 #include "oops/objArrayKlass.inline.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "runtime/atomic.inline.hpp"
57 #include "runtime/fprofiler.hpp"
58 #include "runtime/safepoint.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "services/management.hpp"
61 #include "services/memTracker.hpp"
62 #include "services/memoryService.hpp"
943
944 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
945 const size_t end_region =
946 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
947 _summary_data.clear_range(beg_region, end_region);
948
949 // Clear the data used to 'split' regions.
950 SplitInfo& split_info = _space_info[id].split_info();
951 if (split_info.is_valid()) {
952 split_info.clear();
953 }
954 DEBUG_ONLY(split_info.verify_clear();)
955 }
956
957 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
958 {
959 // Update the from & to space pointers in space_info, since they are swapped
960 // at each young gen gc. Do the update unconditionally (even though a
961 // promotion failure does not swap spaces) because an unknown number of young
962 // collections will have swapped the spaces an unknown number of times.
963 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
964 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
965 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
966 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
967
968 pre_gc_values->fill(heap);
969
970 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
971 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
972
973 // Increment the invocation count
974 heap->increment_total_collections(true);
975
976 // We need to track unique mark sweep invocations as well.
977 _total_invocations++;
978
979 heap->print_heap_before_gc();
980 heap->trace_heap_before_gc(&_gc_tracer);
981
982 // Fill in TLABs
983 heap->accumulate_statistics_all_tlabs();
986 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
987 HandleMark hm; // Discard invalid handles created during verification
988 Universe::verify(" VerifyBeforeGC:");
989 }
990
991 // Verify object start arrays
992 if (VerifyObjectStartArray &&
993 VerifyBeforeGC) {
994 heap->old_gen()->verify_object_start_array();
995 }
996
997 DEBUG_ONLY(mark_bitmap()->verify_clear();)
998 DEBUG_ONLY(summary_data().verify_clear();)
999
1000 // Have worker threads release resources the next time they run a task.
1001 gc_task_manager()->release_all_resources();
1002 }
1003
1004 void PSParallelCompact::post_compact()
1005 {
1006 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1007
1008 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1009 // Clear the marking bitmap, summary data and split info.
1010 clear_data_covering_space(SpaceId(id));
1011 // Update top(). Must be done after clearing the bitmap and summary data.
1012 _space_info[id].publish_new_top();
1013 }
1014
1015 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1016 MutableSpace* const from_space = _space_info[from_space_id].space();
1017 MutableSpace* const to_space = _space_info[to_space_id].space();
1018
1019 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1020 bool eden_empty = eden_space->is_empty();
1021 if (!eden_empty) {
1022 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1023 heap->young_gen(), heap->old_gen());
1024 }
1025
1026 // Update heap occupancy information which is used as input to the soft ref
1807 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1808 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1809 SIZE_FORMAT "-" SIZE_FORMAT " "
1810 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1811 SIZE_FORMAT "-" SIZE_FORMAT,
1812 src_space_id, space_names[src_space_id],
1813 dst_space_id, space_names[dst_space_id],
1814 p2i(src_beg), p2i(src_end),
1815 _summary_data.addr_to_region_idx(src_beg),
1816 _summary_data.addr_to_region_idx(src_end),
1817 p2i(dst_beg), p2i(dst_end),
1818 _summary_data.addr_to_region_idx(dst_beg),
1819 _summary_data.addr_to_region_idx(dst_end));
1820 }
1821 }
1822 #endif // #ifndef PRODUCT
1823
1824 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1825 bool maximum_compaction)
1826 {
1827 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1828 // trace("2");
1829
1830 #ifdef ASSERT
1831 if (TraceParallelOldGCMarkingPhase) {
1832 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1833 "add_obj_bytes=" SIZE_FORMAT,
1834 add_obj_count, add_obj_size * HeapWordSize);
1835 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1836 "mark_bitmap_bytes=" SIZE_FORMAT,
1837 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1838 }
1839 #endif // #ifdef ASSERT
1840
1841 // Quick summarization of each space into itself, to see how much is live.
1842 summarize_spaces_quick();
1843
1844 if (TraceParallelOldGCSummaryPhase) {
1845 tty->print_cr("summary_phase: after summarizing each space to self");
1846 Universe::print();
1847 NOT_PRODUCT(print_region_ranges());
1967
1968 const bool clear_all_soft_refs =
1969 heap->collector_policy()->should_clear_all_soft_refs();
1970
1971 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1972 maximum_heap_compaction);
1973 }
1974
1975 // This method contains no policy. You should probably
1976 // be calling invoke() instead.
1977 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1978 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1979 assert(ref_processor() != NULL, "Sanity");
1980
1981 if (GC_locker::check_active_before_gc()) {
1982 return false;
1983 }
1984
1985 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1986
1987 _gc_timer.register_gc_start();
1988 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1989
1990 TimeStamp marking_start;
1991 TimeStamp compaction_start;
1992 TimeStamp collection_exit;
1993
1994 GCCause::Cause gc_cause = heap->gc_cause();
1995 PSYoungGen* young_gen = heap->young_gen();
1996 PSOldGen* old_gen = heap->old_gen();
1997 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1998
1999 // The scope of casr should end after code that can change
2000 // CollectorPolicy::_should_clear_all_soft_refs.
2001 ClearedAllSoftRefs casr(maximum_heap_compaction,
2002 heap->collector_policy());
2003
2004 if (ZapUnusedHeapArea) {
2005 // Save information needed to minimize mangling
2006 heap->record_gen_tops_before_GC();
2014 // miscellaneous bookkeeping.
2015 PreGCValues pre_gc_values;
2016 pre_compact(&pre_gc_values);
2017
2018 // Get the compaction manager reserved for the VM thread.
2019 ParCompactionManager* const vmthread_cm =
2020 ParCompactionManager::manager_array(gc_task_manager()->workers());
2021
2022 // Place after pre_compact() where the number of invocations is incremented.
2023 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2024
2025 {
2026 ResourceMark rm;
2027 HandleMark hm;
2028
2029 // Set the number of GC threads to be used in this collection
2030 gc_task_manager()->set_active_gang();
2031 gc_task_manager()->task_idle_workers();
2032
2033 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2034 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
2035 TraceCollectorStats tcs(counters());
2036 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2037
2038 if (TraceOldGenTime) accumulated_time()->start();
2039
2040 // Let the size policy know we're starting
2041 size_policy->major_collection_begin();
2042
2043 CodeCache::gc_prologue();
2044
2045 COMPILER2_PRESENT(DerivedPointerTable::clear());
2046
2047 ref_processor()->enable_discovery();
2048 ref_processor()->setup_policy(maximum_heap_compaction);
2049
2050 bool marked_for_unloading = false;
2051
2052 marking_start.update();
2053 marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
2054
2314
2315 // Could update the promoted average here, but it is not typically updated at
2316 // full GCs and the value to use is unclear. Something like
2317 //
2318 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2319
2320 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2321 return true;
2322 }
2323
2324 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2325 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2326 "shouldn't return NULL");
2327 return ParallelScavengeHeap::gc_task_manager();
2328 }
2329
2330 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2331 bool maximum_heap_compaction,
2332 ParallelOldTracer *gc_tracer) {
2333 // Recursively traverse all live objects and mark them
2334 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2335
2336 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2337 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2338 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2339 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2340 ParallelTaskTerminator terminator(active_gc_threads, qset);
2341
2342 ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2343 ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2344
2345 // Need new claim bits before marking starts.
2346 ClassLoaderDataGraph::clear_claimed_marks();
2347
2348 {
2349 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2350
2351 ParallelScavengeHeap::ParStrongRootsScope psrs;
2352
2353 GCTaskQueue* q = GCTaskQueue::create();
2354
2355 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2356 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2357 // We scan the thread roots in parallel
2358 Threads::create_thread_roots_marking_tasks(q);
2359 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2360 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2361 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2363 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2364 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2365 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2366
2367 if (active_gc_threads > 1) {
2368 for (uint j = 0; j < active_gc_threads; j++) {
2369 q->enqueue(new StealMarkingTask(&terminator));
2370 }
2371 }
2372
2373 gc_task_manager()->execute_and_wait(q);
2374 }
2375
2376 // Process reference objects found during marking
2377 {
2378 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2379
2380 ReferenceProcessorStats stats;
2381 if (ref_processor()->processing_is_mt()) {
2382 RefProcTaskExecutor task_executor;
2383 stats = ref_processor()->process_discovered_references(
2384 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2385 &task_executor, &_gc_timer, _gc_tracer.gc_id());
2386 } else {
2387 stats = ref_processor()->process_discovered_references(
2388 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2389 &_gc_timer, _gc_tracer.gc_id());
2390 }
2391
2392 gc_tracer->report_gc_reference_stats(stats);
2393 }
2394
2395 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2396
2397 // This is the point where the entire marking should have completed.
2398 assert(cm->marking_stacks_empty(), "Marking should have completed");
2399
2400 // Follow system dictionary roots and unload classes.
2401 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2402
2403 // Unload nmethods.
2404 CodeCache::do_unloading(is_alive_closure(), purged_class);
2405
2406 // Prune dead klasses from subklass/sibling/implementor lists.
2407 Klass::clean_weak_klass_links(is_alive_closure());
2408
2409 // Delete entries for dead interned strings.
2410 StringTable::unlink(is_alive_closure());
2411
2412 // Clean up unreferenced symbols in symbol table.
2413 SymbolTable::unlink();
2414 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2415 }
2416
2417 // This should be moved to the shared markSweep code!
2418 class PSAlwaysTrueClosure: public BoolObjectClosure {
2419 public:
2420 bool do_object_b(oop p) { return true; }
2421 };
2422 static PSAlwaysTrueClosure always_true;
2423
2424 void PSParallelCompact::adjust_roots() {
2425 // Adjust the pointers to reflect the new locations
2426 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2427
2428 // Need new claim bits when tracing through and adjusting pointers.
2429 ClassLoaderDataGraph::clear_claimed_marks();
2430
2431 // General strong roots.
2432 Universe::oops_do(adjust_pointer_closure());
2433 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
2434 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2435 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2436 ObjectSynchronizer::oops_do(adjust_pointer_closure());
2437 FlatProfiler::oops_do(adjust_pointer_closure());
2438 Management::oops_do(adjust_pointer_closure());
2439 JvmtiExport::oops_do(adjust_pointer_closure());
2440 SystemDictionary::oops_do(adjust_pointer_closure());
2441 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2442
2443 // Now adjust pointers in remaining weak roots. (All of which should
2444 // have been cleared if they pointed to non-surviving objects.)
2445 // Global (weak) JNI handles
2446 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2447
2448 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2449 CodeCache::blobs_do(&adjust_from_blobs);
2450 StringTable::oops_do(adjust_pointer_closure());
2451 ref_processor()->weak_oops_do(adjust_pointer_closure());
2452 // Roots were visited so references into the young gen in roots
2453 // may have been scanned. Process them also.
2454 // Should the reference processor have a span that excludes
2455 // young gen objects?
2456 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2457 }
2458
2459 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2460 uint parallel_gc_threads)
2461 {
2462 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2463
2464 // Find the threads that are active
2465 unsigned int which = 0;
2466
2467 const uint task_count = MAX2(parallel_gc_threads, 1U);
2468 for (uint j = 0; j < task_count; j++) {
2469 q->enqueue(new DrainStacksCompactionTask(j));
2470 ParCompactionManager::verify_region_list_empty(j);
2471 // Set the region stacks variables to "no" region stack values
2472 // so that they will be recognized and needing a region stack
2473 // in the stealing tasks if they do not get one by executing
2474 // a draining stack.
2475 ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2476 cm->set_region_stack(NULL);
2477 cm->set_region_stack_index((uint)max_uintx);
2478 }
2479 ParCompactionManager::reset_recycled_stack_index();
2480
2481 // Find all regions that are available (can be filled immediately) and
2482 // distribute them to the thread stacks. The iteration is done in reverse
2516 // Assign regions to tasks in round-robin fashion.
2517 if (++which == task_count) {
2518 assert(which <= parallel_gc_threads,
2519 "Inconsistent number of workers");
2520 which = 0;
2521 }
2522 }
2523 }
2524 }
2525
2526 if (TraceParallelOldGCCompactionPhase) {
2527 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2528 gclog_or_tty->print_cr(SIZE_FORMAT " initially fillable regions", fillable_regions);
2529 }
2530 }
2531
2532 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2533
2534 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2535 uint parallel_gc_threads) {
2536 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2537
2538 ParallelCompactData& sd = PSParallelCompact::summary_data();
2539
2540 // Iterate over all the spaces adding tasks for updating
2541 // regions in the dense prefix. Assume that 1 gc thread
2542 // will work on opening the gaps and the remaining gc threads
2543 // will work on the dense prefix.
2544 unsigned int space_id;
2545 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2546 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2547 const MutableSpace* const space = _space_info[space_id].space();
2548
2549 if (dense_prefix_end == space->bottom()) {
2550 // There is no dense prefix for this space.
2551 continue;
2552 }
2553
2554 // The dense prefix is before this region.
2555 size_t region_index_end_dense_prefix =
2556 sd.addr_to_region_idx(dense_prefix_end);
2598 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2599 region_index_start,
2600 region_index_end));
2601 region_index_start = region_index_end;
2602 }
2603 }
2604 // This gets any part of the dense prefix that did not
2605 // fit evenly.
2606 if (region_index_start < region_index_end_dense_prefix) {
2607 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2608 region_index_start,
2609 region_index_end_dense_prefix));
2610 }
2611 }
2612 }
2613
2614 void PSParallelCompact::enqueue_region_stealing_tasks(
2615 GCTaskQueue* q,
2616 ParallelTaskTerminator* terminator_ptr,
2617 uint parallel_gc_threads) {
2618 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2619
2620 // Once a thread has drained it's stack, it should try to steal regions from
2621 // other threads.
2622 if (parallel_gc_threads > 1) {
2623 for (uint j = 0; j < parallel_gc_threads; j++) {
2624 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2625 }
2626 }
2627 }
2628
2629 #ifdef ASSERT
2630 // Write a histogram of the number of times the block table was filled for a
2631 // region.
2632 void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
2633 {
2634 if (!TraceParallelOldGCCompactionPhase) return;
2635
2636 typedef ParallelCompactData::RegionData rd_t;
2637 ParallelCompactData& sd = summary_data();
2638
2646 size_t histo[5] = { 0, 0, 0, 0, 0 };
2647 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2648 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2649
2650 for (const rd_t* cur = beg; cur < end; ++cur) {
2651 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2652 }
2653 out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2654 for (size_t i = 0; i < histo_len; ++i) {
2655 out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2656 histo[i], 100.0 * histo[i] / region_cnt);
2657 }
2658 out->cr();
2659 }
2660 }
2661 }
2662 #endif // #ifdef ASSERT
2663
2664 void PSParallelCompact::compact() {
2665 // trace("5");
2666 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2667
2668 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2669 PSOldGen* old_gen = heap->old_gen();
2670 old_gen->start_array()->reset();
2671 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2672 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2673 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2674 ParallelTaskTerminator terminator(active_gc_threads, qset);
2675
2676 GCTaskQueue* q = GCTaskQueue::create();
2677 enqueue_region_draining_tasks(q, active_gc_threads);
2678 enqueue_dense_prefix_tasks(q, active_gc_threads);
2679 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2680
2681 {
2682 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2683
2684 gc_task_manager()->execute_and_wait(q);
2685
2686 #ifdef ASSERT
2687 // Verify that all regions have been processed before the deferred updates.
2688 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2689 verify_complete(SpaceId(id));
2690 }
2691 #endif
2692 }
2693
2694 {
2695 // Update the deferred objects, if any. Any compaction manager can be used.
2696 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2697 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2698 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2699 update_deferred_objects(cm, SpaceId(id));
2700 }
2701 }
2702
2703 DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
2704 }
2705
2706 #ifdef ASSERT
2707 void PSParallelCompact::verify_complete(SpaceId space_id) {
2708 // All Regions between space bottom() to new_top() should be marked as filled
2709 // and all Regions between new_top() and top() should be available (i.e.,
2710 // should have been emptied).
2711 ParallelCompactData& sd = summary_data();
2712 SpaceInfo si = _space_info[space_id];
2713 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2714 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2715 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2716 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
|
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
31 #include "gc/parallel/pcTasks.hpp"
32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
33 #include "gc/parallel/psCompactionManager.inline.hpp"
34 #include "gc/parallel/psMarkSweep.hpp"
35 #include "gc/parallel/psMarkSweepDecorator.hpp"
36 #include "gc/parallel/psOldGen.hpp"
37 #include "gc/parallel/psParallelCompact.inline.hpp"
38 #include "gc/parallel/psPromotionManager.inline.hpp"
39 #include "gc/parallel/psScavenge.hpp"
40 #include "gc/parallel/psYoungGen.hpp"
41 #include "gc/shared/gcCause.hpp"
42 #include "gc/shared/gcHeapSummary.hpp"
43 #include "gc/shared/gcId.hpp"
44 #include "gc/shared/gcLocker.inline.hpp"
45 #include "gc/shared/gcTimer.hpp"
46 #include "gc/shared/gcTrace.hpp"
47 #include "gc/shared/gcTraceTime.hpp"
48 #include "gc/shared/isGCActiveMark.hpp"
49 #include "gc/shared/referencePolicy.hpp"
50 #include "gc/shared/referenceProcessor.hpp"
51 #include "gc/shared/spaceDecorator.hpp"
52 #include "oops/instanceKlass.inline.hpp"
53 #include "oops/instanceMirrorKlass.inline.hpp"
54 #include "oops/methodData.hpp"
55 #include "oops/objArrayKlass.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "runtime/atomic.inline.hpp"
58 #include "runtime/fprofiler.hpp"
59 #include "runtime/safepoint.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "services/management.hpp"
62 #include "services/memTracker.hpp"
63 #include "services/memoryService.hpp"
944
945 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
946 const size_t end_region =
947 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
948 _summary_data.clear_range(beg_region, end_region);
949
950 // Clear the data used to 'split' regions.
951 SplitInfo& split_info = _space_info[id].split_info();
952 if (split_info.is_valid()) {
953 split_info.clear();
954 }
955 DEBUG_ONLY(split_info.verify_clear();)
956 }
957
958 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
959 {
960 // Update the from & to space pointers in space_info, since they are swapped
961 // at each young gen gc. Do the update unconditionally (even though a
962 // promotion failure does not swap spaces) because an unknown number of young
963 // collections will have swapped the spaces an unknown number of times.
964 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
965 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
966 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
967 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
968
969 pre_gc_values->fill(heap);
970
971 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
972 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
973
974 // Increment the invocation count
975 heap->increment_total_collections(true);
976
977 // We need to track unique mark sweep invocations as well.
978 _total_invocations++;
979
980 heap->print_heap_before_gc();
981 heap->trace_heap_before_gc(&_gc_tracer);
982
983 // Fill in TLABs
984 heap->accumulate_statistics_all_tlabs();
987 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
988 HandleMark hm; // Discard invalid handles created during verification
989 Universe::verify(" VerifyBeforeGC:");
990 }
991
992 // Verify object start arrays
993 if (VerifyObjectStartArray &&
994 VerifyBeforeGC) {
995 heap->old_gen()->verify_object_start_array();
996 }
997
998 DEBUG_ONLY(mark_bitmap()->verify_clear();)
999 DEBUG_ONLY(summary_data().verify_clear();)
1000
1001 // Have worker threads release resources the next time they run a task.
1002 gc_task_manager()->release_all_resources();
1003 }
1004
1005 void PSParallelCompact::post_compact()
1006 {
1007 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
1008
1009 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1010 // Clear the marking bitmap, summary data and split info.
1011 clear_data_covering_space(SpaceId(id));
1012 // Update top(). Must be done after clearing the bitmap and summary data.
1013 _space_info[id].publish_new_top();
1014 }
1015
1016 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1017 MutableSpace* const from_space = _space_info[from_space_id].space();
1018 MutableSpace* const to_space = _space_info[to_space_id].space();
1019
1020 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1021 bool eden_empty = eden_space->is_empty();
1022 if (!eden_empty) {
1023 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1024 heap->young_gen(), heap->old_gen());
1025 }
1026
1027 // Update heap occupancy information which is used as input to the soft ref
1808 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1809 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1810 SIZE_FORMAT "-" SIZE_FORMAT " "
1811 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1812 SIZE_FORMAT "-" SIZE_FORMAT,
1813 src_space_id, space_names[src_space_id],
1814 dst_space_id, space_names[dst_space_id],
1815 p2i(src_beg), p2i(src_end),
1816 _summary_data.addr_to_region_idx(src_beg),
1817 _summary_data.addr_to_region_idx(src_end),
1818 p2i(dst_beg), p2i(dst_end),
1819 _summary_data.addr_to_region_idx(dst_beg),
1820 _summary_data.addr_to_region_idx(dst_end));
1821 }
1822 }
1823 #endif // #ifndef PRODUCT
1824
1825 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1826 bool maximum_compaction)
1827 {
1828 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
1829 // trace("2");
1830
1831 #ifdef ASSERT
1832 if (TraceParallelOldGCMarkingPhase) {
1833 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1834 "add_obj_bytes=" SIZE_FORMAT,
1835 add_obj_count, add_obj_size * HeapWordSize);
1836 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1837 "mark_bitmap_bytes=" SIZE_FORMAT,
1838 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1839 }
1840 #endif // #ifdef ASSERT
1841
1842 // Quick summarization of each space into itself, to see how much is live.
1843 summarize_spaces_quick();
1844
1845 if (TraceParallelOldGCSummaryPhase) {
1846 tty->print_cr("summary_phase: after summarizing each space to self");
1847 Universe::print();
1848 NOT_PRODUCT(print_region_ranges());
1968
1969 const bool clear_all_soft_refs =
1970 heap->collector_policy()->should_clear_all_soft_refs();
1971
1972 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1973 maximum_heap_compaction);
1974 }
1975
1976 // This method contains no policy. You should probably
1977 // be calling invoke() instead.
1978 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1979 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1980 assert(ref_processor() != NULL, "Sanity");
1981
1982 if (GC_locker::check_active_before_gc()) {
1983 return false;
1984 }
1985
1986 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1987
1988 GCIdMark gc_id_mark;
1989 _gc_timer.register_gc_start();
1990 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1991
1992 TimeStamp marking_start;
1993 TimeStamp compaction_start;
1994 TimeStamp collection_exit;
1995
1996 GCCause::Cause gc_cause = heap->gc_cause();
1997 PSYoungGen* young_gen = heap->young_gen();
1998 PSOldGen* old_gen = heap->old_gen();
1999 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2000
2001 // The scope of casr should end after code that can change
2002 // CollectorPolicy::_should_clear_all_soft_refs.
2003 ClearedAllSoftRefs casr(maximum_heap_compaction,
2004 heap->collector_policy());
2005
2006 if (ZapUnusedHeapArea) {
2007 // Save information needed to minimize mangling
2008 heap->record_gen_tops_before_GC();
2016 // miscellaneous bookkeeping.
2017 PreGCValues pre_gc_values;
2018 pre_compact(&pre_gc_values);
2019
2020 // Get the compaction manager reserved for the VM thread.
2021 ParCompactionManager* const vmthread_cm =
2022 ParCompactionManager::manager_array(gc_task_manager()->workers());
2023
2024 // Place after pre_compact() where the number of invocations is incremented.
2025 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2026
2027 {
2028 ResourceMark rm;
2029 HandleMark hm;
2030
2031 // Set the number of GC threads to be used in this collection
2032 gc_task_manager()->set_active_gang();
2033 gc_task_manager()->task_idle_workers();
2034
2035 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2036 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
2037 TraceCollectorStats tcs(counters());
2038 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2039
2040 if (TraceOldGenTime) accumulated_time()->start();
2041
2042 // Let the size policy know we're starting
2043 size_policy->major_collection_begin();
2044
2045 CodeCache::gc_prologue();
2046
2047 COMPILER2_PRESENT(DerivedPointerTable::clear());
2048
2049 ref_processor()->enable_discovery();
2050 ref_processor()->setup_policy(maximum_heap_compaction);
2051
2052 bool marked_for_unloading = false;
2053
2054 marking_start.update();
2055 marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
2056
2316
2317 // Could update the promoted average here, but it is not typically updated at
2318 // full GCs and the value to use is unclear. Something like
2319 //
2320 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2321
2322 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2323 return true;
2324 }
2325
2326 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2327 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2328 "shouldn't return NULL");
2329 return ParallelScavengeHeap::gc_task_manager();
2330 }
2331
2332 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2333 bool maximum_heap_compaction,
2334 ParallelOldTracer *gc_tracer) {
2335 // Recursively traverse all live objects and mark them
2336 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
2337
2338 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2339 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2340 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2341 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2342 ParallelTaskTerminator terminator(active_gc_threads, qset);
2343
2344 ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2345 ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2346
2347 // Need new claim bits before marking starts.
2348 ClassLoaderDataGraph::clear_claimed_marks();
2349
2350 {
2351 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
2352
2353 ParallelScavengeHeap::ParStrongRootsScope psrs;
2354
2355 GCTaskQueue* q = GCTaskQueue::create();
2356
2357 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2358 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2359 // We scan the thread roots in parallel
2360 Threads::create_thread_roots_marking_tasks(q);
2361 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2363 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2364 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2365 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2366 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2367 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2368
2369 if (active_gc_threads > 1) {
2370 for (uint j = 0; j < active_gc_threads; j++) {
2371 q->enqueue(new StealMarkingTask(&terminator));
2372 }
2373 }
2374
2375 gc_task_manager()->execute_and_wait(q);
2376 }
2377
2378 // Process reference objects found during marking
2379 {
2380 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
2381
2382 ReferenceProcessorStats stats;
2383 if (ref_processor()->processing_is_mt()) {
2384 RefProcTaskExecutor task_executor;
2385 stats = ref_processor()->process_discovered_references(
2386 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2387 &task_executor, &_gc_timer);
2388 } else {
2389 stats = ref_processor()->process_discovered_references(
2390 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2391 &_gc_timer);
2392 }
2393
2394 gc_tracer->report_gc_reference_stats(stats);
2395 }
2396
2397 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
2398
2399 // This is the point where the entire marking should have completed.
2400 assert(cm->marking_stacks_empty(), "Marking should have completed");
2401
2402 // Follow system dictionary roots and unload classes.
2403 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2404
2405 // Unload nmethods.
2406 CodeCache::do_unloading(is_alive_closure(), purged_class);
2407
2408 // Prune dead klasses from subklass/sibling/implementor lists.
2409 Klass::clean_weak_klass_links(is_alive_closure());
2410
2411 // Delete entries for dead interned strings.
2412 StringTable::unlink(is_alive_closure());
2413
2414 // Clean up unreferenced symbols in symbol table.
2415 SymbolTable::unlink();
2416 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2417 }
2418
2419 // This should be moved to the shared markSweep code!
2420 class PSAlwaysTrueClosure: public BoolObjectClosure {
2421 public:
2422 bool do_object_b(oop p) { return true; }
2423 };
2424 static PSAlwaysTrueClosure always_true;
2425
2426 void PSParallelCompact::adjust_roots() {
2427 // Adjust the pointers to reflect the new locations
2428 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
2429
2430 // Need new claim bits when tracing through and adjusting pointers.
2431 ClassLoaderDataGraph::clear_claimed_marks();
2432
2433 // General strong roots.
2434 Universe::oops_do(adjust_pointer_closure());
2435 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
2436 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2437 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2438 ObjectSynchronizer::oops_do(adjust_pointer_closure());
2439 FlatProfiler::oops_do(adjust_pointer_closure());
2440 Management::oops_do(adjust_pointer_closure());
2441 JvmtiExport::oops_do(adjust_pointer_closure());
2442 SystemDictionary::oops_do(adjust_pointer_closure());
2443 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2444
2445 // Now adjust pointers in remaining weak roots. (All of which should
2446 // have been cleared if they pointed to non-surviving objects.)
2447 // Global (weak) JNI handles
2448 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2449
2450 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2451 CodeCache::blobs_do(&adjust_from_blobs);
2452 StringTable::oops_do(adjust_pointer_closure());
2453 ref_processor()->weak_oops_do(adjust_pointer_closure());
2454 // Roots were visited so references into the young gen in roots
2455 // may have been scanned. Process them also.
2456 // Should the reference processor have a span that excludes
2457 // young gen objects?
2458 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2459 }
2460
2461 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2462 uint parallel_gc_threads)
2463 {
2464 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
2465
2466 // Find the threads that are active
2467 unsigned int which = 0;
2468
2469 const uint task_count = MAX2(parallel_gc_threads, 1U);
2470 for (uint j = 0; j < task_count; j++) {
2471 q->enqueue(new DrainStacksCompactionTask(j));
2472 ParCompactionManager::verify_region_list_empty(j);
2473 // Set the region stacks variables to "no" region stack values
2474 // so that they will be recognized and needing a region stack
2475 // in the stealing tasks if they do not get one by executing
2476 // a draining stack.
2477 ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2478 cm->set_region_stack(NULL);
2479 cm->set_region_stack_index((uint)max_uintx);
2480 }
2481 ParCompactionManager::reset_recycled_stack_index();
2482
2483 // Find all regions that are available (can be filled immediately) and
2484 // distribute them to the thread stacks. The iteration is done in reverse
2518 // Assign regions to tasks in round-robin fashion.
2519 if (++which == task_count) {
2520 assert(which <= parallel_gc_threads,
2521 "Inconsistent number of workers");
2522 which = 0;
2523 }
2524 }
2525 }
2526 }
2527
2528 if (TraceParallelOldGCCompactionPhase) {
2529 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2530 gclog_or_tty->print_cr(SIZE_FORMAT " initially fillable regions", fillable_regions);
2531 }
2532 }
2533
2534 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2535
2536 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2537 uint parallel_gc_threads) {
2538 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
2539
2540 ParallelCompactData& sd = PSParallelCompact::summary_data();
2541
2542 // Iterate over all the spaces adding tasks for updating
2543 // regions in the dense prefix. Assume that 1 gc thread
2544 // will work on opening the gaps and the remaining gc threads
2545 // will work on the dense prefix.
2546 unsigned int space_id;
2547 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2548 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2549 const MutableSpace* const space = _space_info[space_id].space();
2550
2551 if (dense_prefix_end == space->bottom()) {
2552 // There is no dense prefix for this space.
2553 continue;
2554 }
2555
2556 // The dense prefix is before this region.
2557 size_t region_index_end_dense_prefix =
2558 sd.addr_to_region_idx(dense_prefix_end);
2600 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2601 region_index_start,
2602 region_index_end));
2603 region_index_start = region_index_end;
2604 }
2605 }
2606 // This gets any part of the dense prefix that did not
2607 // fit evenly.
2608 if (region_index_start < region_index_end_dense_prefix) {
2609 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2610 region_index_start,
2611 region_index_end_dense_prefix));
2612 }
2613 }
2614 }
2615
2616 void PSParallelCompact::enqueue_region_stealing_tasks(
2617 GCTaskQueue* q,
2618 ParallelTaskTerminator* terminator_ptr,
2619 uint parallel_gc_threads) {
2620 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
2621
2622 // Once a thread has drained it's stack, it should try to steal regions from
2623 // other threads.
2624 if (parallel_gc_threads > 1) {
2625 for (uint j = 0; j < parallel_gc_threads; j++) {
2626 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2627 }
2628 }
2629 }
2630
2631 #ifdef ASSERT
2632 // Write a histogram of the number of times the block table was filled for a
2633 // region.
2634 void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
2635 {
2636 if (!TraceParallelOldGCCompactionPhase) return;
2637
2638 typedef ParallelCompactData::RegionData rd_t;
2639 ParallelCompactData& sd = summary_data();
2640
2648 size_t histo[5] = { 0, 0, 0, 0, 0 };
2649 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2650 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2651
2652 for (const rd_t* cur = beg; cur < end; ++cur) {
2653 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2654 }
2655 out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2656 for (size_t i = 0; i < histo_len; ++i) {
2657 out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2658 histo[i], 100.0 * histo[i] / region_cnt);
2659 }
2660 out->cr();
2661 }
2662 }
2663 }
2664 #endif // #ifdef ASSERT
2665
2666 void PSParallelCompact::compact() {
2667 // trace("5");
2668 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
2669
2670 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2671 PSOldGen* old_gen = heap->old_gen();
2672 old_gen->start_array()->reset();
2673 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2674 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2675 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2676 ParallelTaskTerminator terminator(active_gc_threads, qset);
2677
2678 GCTaskQueue* q = GCTaskQueue::create();
2679 enqueue_region_draining_tasks(q, active_gc_threads);
2680 enqueue_dense_prefix_tasks(q, active_gc_threads);
2681 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2682
2683 {
2684 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
2685
2686 gc_task_manager()->execute_and_wait(q);
2687
2688 #ifdef ASSERT
2689 // Verify that all regions have been processed before the deferred updates.
2690 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2691 verify_complete(SpaceId(id));
2692 }
2693 #endif
2694 }
2695
2696 {
2697 // Update the deferred objects, if any. Any compaction manager can be used.
2698 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
2699 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2700 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2701 update_deferred_objects(cm, SpaceId(id));
2702 }
2703 }
2704
2705 DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
2706 }
2707
2708 #ifdef ASSERT
2709 void PSParallelCompact::verify_complete(SpaceId space_id) {
2710 // All Regions between space bottom() to new_top() should be marked as filled
2711 // and all Regions between new_top() and top() should be available (i.e.,
2712 // should have been emptied).
2713 ParallelCompactData& sd = summary_data();
2714 SpaceInfo si = _space_info[space_id];
2715 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2716 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2717 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2718 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
|