src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
  33 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
  35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  37 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  39 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  40 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  41 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"




  42 #include "gc_implementation/shared/isGCActiveMark.hpp"
  43 #include "gc_interface/gcCause.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/referenceProcessor.hpp"
  47 #include "oops/methodData.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "oops/oop.pcgc.inline.hpp"
  50 #include "runtime/fprofiler.hpp"
  51 #include "runtime/safepoint.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "services/memTracker.hpp"
  56 #include "utilities/events.hpp"
  57 #include "utilities/stack.inline.hpp"
  58 
  59 #include <math.h>
  60 
  61 // All sizes are in HeapWords.


 755   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 756   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 757   for (const size_t* p = beg; p < end; ++p) {
 758     assert(*p == 0, "not zero");
 759   }
 760 }
 761 
 762 void ParallelCompactData::verify_clear()
 763 {
 764   verify_clear(_region_vspace);
 765 }
 766 #endif  // #ifdef ASSERT
 767 
 768 #ifdef NOT_PRODUCT
 769 ParallelCompactData::RegionData* debug_region(size_t region_index) {
 770   ParallelCompactData& sd = PSParallelCompact::summary_data();
 771   return sd.region(region_index);
 772 }
 773 #endif
 774 


 775 elapsedTimer        PSParallelCompact::_accumulated_time;
 776 unsigned int        PSParallelCompact::_total_invocations = 0;
 777 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 778 jlong               PSParallelCompact::_time_of_last_gc = 0;
 779 CollectorCounters*  PSParallelCompact::_counters = NULL;
 780 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 781 ParallelCompactData PSParallelCompact::_summary_data;
 782 
 783 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 784 
 785 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 786 
 787 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 788 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 789 
 790 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
 791 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
 792 
 793 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
 794 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }


 928 
 929   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 930   const size_t end_region =
 931     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 932   _summary_data.clear_range(beg_region, end_region);
 933 
 934   // Clear the data used to 'split' regions.
 935   SplitInfo& split_info = _space_info[id].split_info();
 936   if (split_info.is_valid()) {
 937     split_info.clear();
 938   }
 939   DEBUG_ONLY(split_info.verify_clear();)
 940 }
 941 
 942 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
 943 {
 944   // Update the from & to space pointers in space_info, since they are swapped
 945   // at each young gen gc.  Do the update unconditionally (even though a
 946   // promotion failure does not swap spaces) because an unknown number of minor
 947   // collections will have swapped the spaces an unknown number of times.
 948   TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
 949   ParallelScavengeHeap* heap = gc_heap();
 950   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 951   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 952 
 953   pre_gc_values->fill(heap);
 954 
 955   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 956   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 957 
 958   // Increment the invocation count
 959   heap->increment_total_collections(true);
 960 
 961   // We need to track unique mark sweep invocations as well.
 962   _total_invocations++;
 963 
 964   heap->print_heap_before_gc();

 965 
 966   // Fill in TLABs
 967   heap->accumulate_statistics_all_tlabs();
 968   heap->ensure_parsability(true);  // retire TLABs
 969 
 970   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 971     HandleMark hm;  // Discard invalid handles created during verification
 972     Universe::verify(" VerifyBeforeGC:");
 973   }
 974 
 975   // Verify object start arrays
 976   if (VerifyObjectStartArray &&
 977       VerifyBeforeGC) {
 978     heap->old_gen()->verify_object_start_array();
 979   }
 980 
 981   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 982   DEBUG_ONLY(summary_data().verify_clear();)
 983 
 984   // Have worker threads release resources the next time they run a task.
 985   gc_task_manager()->release_all_resources();
 986 }
 987 
 988 void PSParallelCompact::post_compact()
 989 {
 990   TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
 991 
 992   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 993     // Clear the marking bitmap, summary data and split info.
 994     clear_data_covering_space(SpaceId(id));
 995     // Update top().  Must be done after clearing the bitmap and summary data.
 996     _space_info[id].publish_new_top();
 997   }
 998 
 999   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1000   MutableSpace* const from_space = _space_info[from_space_id].space();
1001   MutableSpace* const to_space   = _space_info[to_space_id].space();
1002 
1003   ParallelScavengeHeap* heap = gc_heap();
1004   bool eden_empty = eden_space->is_empty();
1005   if (!eden_empty) {
1006     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1007                                             heap->young_gen(), heap->old_gen());
1008   }
1009 
1010   // Update heap occupancy information which is used as input to the soft ref


1796     tty->print_cr("summarizing %d [%s] into %d [%s]:  "
1797                   "src=" PTR_FORMAT "-" PTR_FORMAT " "
1798                   SIZE_FORMAT "-" SIZE_FORMAT " "
1799                   "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1800                   SIZE_FORMAT "-" SIZE_FORMAT,
1801                   src_space_id, space_names[src_space_id],
1802                   dst_space_id, space_names[dst_space_id],
1803                   src_beg, src_end,
1804                   _summary_data.addr_to_region_idx(src_beg),
1805                   _summary_data.addr_to_region_idx(src_end),
1806                   dst_beg, dst_end,
1807                   _summary_data.addr_to_region_idx(dst_beg),
1808                   _summary_data.addr_to_region_idx(dst_end));
1809   }
1810 }
1811 #endif  // #ifndef PRODUCT
1812 
1813 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1814                                       bool maximum_compaction)
1815 {
1816   TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1817   // trace("2");
1818 
1819 #ifdef  ASSERT
1820   if (TraceParallelOldGCMarkingPhase) {
1821     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1822                   "add_obj_bytes=" SIZE_FORMAT,
1823                   add_obj_count, add_obj_size * HeapWordSize);
1824     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1825                   "mark_bitmap_bytes=" SIZE_FORMAT,
1826                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1827   }
1828 #endif  // #ifdef ASSERT
1829 
1830   // Quick summarization of each space into itself, to see how much is live.
1831   summarize_spaces_quick();
1832 
1833   if (TraceParallelOldGCSummaryPhase) {
1834     tty->print_cr("summary_phase:  after summarizing each space to self");
1835     Universe::print();
1836     NOT_PRODUCT(print_region_ranges());


1959 
1960   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1961                                       maximum_heap_compaction);
1962 }
1963 
1964 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1965   size_t addr_region_index = addr_to_region_idx(addr);
1966   return region_index == addr_region_index;
1967 }
1968 
1969 // This method contains no policy. You should probably
1970 // be calling invoke() instead.
1971 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1972   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1973   assert(ref_processor() != NULL, "Sanity");
1974 
1975   if (GC_locker::check_active_before_gc()) {
1976     return false;
1977   }
1978 





1979   TimeStamp marking_start;
1980   TimeStamp compaction_start;
1981   TimeStamp collection_exit;
1982 
1983   ParallelScavengeHeap* heap = gc_heap();
1984   GCCause::Cause gc_cause = heap->gc_cause();
1985   PSYoungGen* young_gen = heap->young_gen();
1986   PSOldGen* old_gen = heap->old_gen();
1987   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1988 
1989   // The scope of casr should end after code that can change
1990   // CollectorPolicy::_should_clear_all_soft_refs.
1991   ClearedAllSoftRefs casr(maximum_heap_compaction,
1992                           heap->collector_policy());
1993 
1994   if (ZapUnusedHeapArea) {
1995     // Save information needed to minimize mangling
1996     heap->record_gen_tops_before_GC();
1997   }
1998 
1999   heap->pre_full_gc_dump();
2000 
2001   _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
2002 
2003   // Make sure data structures are sane, make the heap parsable, and do other
2004   // miscellaneous bookkeeping.
2005   PreGCValues pre_gc_values;
2006   pre_compact(&pre_gc_values);
2007 
2008   // Get the compaction manager reserved for the VM thread.
2009   ParCompactionManager* const vmthread_cm =
2010     ParCompactionManager::manager_array(gc_task_manager()->workers());
2011 
2012   // Place after pre_compact() where the number of invocations is incremented.
2013   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2014 
2015   {
2016     ResourceMark rm;
2017     HandleMark hm;
2018 
2019     // Set the number of GC threads to be used in this collection
2020     gc_task_manager()->set_active_gang();
2021     gc_task_manager()->task_idle_workers();
2022     heap->set_par_threads(gc_task_manager()->active_workers());
2023 
2024     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2025     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2026     TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
2027     TraceCollectorStats tcs(counters());
2028     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2029 
2030     if (TraceGen1Time) accumulated_time()->start();
2031 
2032     // Let the size policy know we're starting
2033     size_policy->major_collection_begin();
2034 
2035     CodeCache::gc_prologue();
2036     Threads::gc_prologue();
2037 
2038     COMPILER2_PRESENT(DerivedPointerTable::clear());
2039 
2040     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
2041     ref_processor()->setup_policy(maximum_heap_compaction);
2042 
2043     bool marked_for_unloading = false;
2044 
2045     marking_start.update();
2046     marking_phase(vmthread_cm, maximum_heap_compaction);
2047 
2048     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
2049       && gc_cause == GCCause::_java_lang_system_gc;
2050     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2051 
2052     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2053     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2054 
2055     // adjust_roots() updates Universe::_intArrayKlassObj which is
2056     // needed by the compaction for filling holes in the dense prefix.
2057     adjust_roots();
2058 
2059     compaction_start.update();
2060     compact();
2061 
2062     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
2063     // done before resizing.
2064     post_compact();
2065 
2066     // Let the size policy know we're done


2179   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2180     HandleMark hm;  // Discard invalid handles created during verification
2181     Universe::verify(" VerifyAfterGC:");
2182   }
2183 
2184   // Re-verify object start arrays
2185   if (VerifyObjectStartArray &&
2186       VerifyAfterGC) {
2187     old_gen->verify_object_start_array();
2188   }
2189 
2190   if (ZapUnusedHeapArea) {
2191     old_gen->object_space()->check_mangled_unused_area_complete();
2192   }
2193 
2194   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2195 
2196   collection_exit.update();
2197 
2198   heap->print_heap_after_gc();


2199   if (PrintGCTaskTimeStamps) {
2200     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2201                            INT64_FORMAT,
2202                            marking_start.ticks(), compaction_start.ticks(),
2203                            collection_exit.ticks());
2204     gc_task_manager()->print_task_time_stamps();
2205   }
2206 
2207   heap->post_full_gc_dump();
2208 
2209 #ifdef TRACESPINNING
2210   ParallelTaskTerminator::print_termination_counts();
2211 #endif
2212 





2213   return true;
2214 }
2215 
2216 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2217                                              PSYoungGen* young_gen,
2218                                              PSOldGen* old_gen) {
2219   MutableSpace* const eden_space = young_gen->eden_space();
2220   assert(!eden_space->is_empty(), "eden must be non-empty");
2221   assert(young_gen->virtual_space()->alignment() ==
2222          old_gen->virtual_space()->alignment(), "alignments do not match");
2223 
2224   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2225     return false;
2226   }
2227 
2228   // Both generations must be completely committed.
2229   if (young_gen->virtual_space()->uncommitted_size() != 0) {
2230     return false;
2231   }
2232   if (old_gen->virtual_space()->uncommitted_size() != 0) {


2291   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2292     start_array->allocate_block(p);
2293   }
2294 
2295   // Could update the promoted average here, but it is not typically updated at
2296   // full GCs and the value to use is unclear.  Something like
2297   //
2298   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2299 
2300   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2301   return true;
2302 }
2303 
2304 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2305   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2306     "shouldn't return NULL");
2307   return ParallelScavengeHeap::gc_task_manager();
2308 }
2309 
2310 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2311                                       bool maximum_heap_compaction) {

2312   // Recursively traverse all live objects and mark them
2313   TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2314 
2315   ParallelScavengeHeap* heap = gc_heap();
2316   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2317   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2318   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2319   ParallelTaskTerminator terminator(active_gc_threads, qset);
2320 
2321   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2322   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2323 
2324   // Need new claim bits before marking starts.
2325   ClassLoaderDataGraph::clear_claimed_marks();
2326 
2327   {
2328     TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);

2329     ParallelScavengeHeap::ParStrongRootsScope psrs;
2330 
2331     GCTaskQueue* q = GCTaskQueue::create();
2332 
2333     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2334     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2335     // We scan the thread roots in parallel
2336     Threads::create_thread_roots_marking_tasks(q);
2337     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2338     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2339     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2340     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2341     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2342     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2343     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2344 
2345     if (active_gc_threads > 1) {
2346       for (uint j = 0; j < active_gc_threads; j++) {
2347         q->enqueue(new StealMarkingTask(&terminator));
2348       }
2349     }
2350 
2351     gc_task_manager()->execute_and_wait(q);
2352   }
2353 
2354   // Process reference objects found during marking
2355   {
2356     TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);


2357     if (ref_processor()->processing_is_mt()) {
2358       RefProcTaskExecutor task_executor;
2359       ref_processor()->process_discovered_references(
2360         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2361         &task_executor);
2362     } else {
2363       ref_processor()->process_discovered_references(
2364         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);

2365     }


2366   }
2367 
2368   TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
2369 
2370   // This is the point where the entire marking should have completed.
2371   assert(cm->marking_stacks_empty(), "Marking should have completed");
2372 
2373   // Follow system dictionary roots and unload classes.
2374   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2375 
2376   // Unload nmethods.
2377   CodeCache::do_unloading(is_alive_closure(), purged_class);
2378 
2379   // Prune dead klasses from subklass/sibling/implementor lists.
2380   Klass::clean_weak_klass_links(is_alive_closure());
2381 
2382   // Delete entries for dead interned strings.
2383   StringTable::unlink(is_alive_closure());
2384 
2385   // Clean up unreferenced symbols in symbol table.
2386   SymbolTable::unlink();

2387 }
2388 
2389 void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
2390   ClassLoaderData* cld = klass->class_loader_data();
2391   // The actual processing of the klass is done when we
2392   // traverse the list of Klasses in the class loader data.
2393   PSParallelCompact::follow_class_loader(cm, cld);
2394 }
2395 
2396 void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
2397   ClassLoaderData* cld = klass->class_loader_data();
2398   // The actual processing of the klass is done when we
2399   // traverse the list of Klasses in the class loader data.
2400   PSParallelCompact::adjust_class_loader(cm, cld);
2401 }
2402 
2403 void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
2404                                             ClassLoaderData* cld) {
2405   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2406   PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
2407 
2408   cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
2409 }
2410 
2411 void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
2412                                             ClassLoaderData* cld) {
2413   cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
2414                PSParallelCompact::adjust_klass_closure(),
2415                true);
2416 }
2417 
2418 // This should be moved to the shared markSweep code!
2419 class PSAlwaysTrueClosure: public BoolObjectClosure {
2420 public:
2421   bool do_object_b(oop p) { return true; }
2422 };
2423 static PSAlwaysTrueClosure always_true;
2424 
2425 void PSParallelCompact::adjust_roots() {
2426   // Adjust the pointers to reflect the new locations
2427   TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2428 
2429   // Need new claim bits when tracing through and adjusting pointers.
2430   ClassLoaderDataGraph::clear_claimed_marks();
2431 
2432   // General strong roots.
2433   Universe::oops_do(adjust_pointer_closure());
2434   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
2435   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2436   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2437   ObjectSynchronizer::oops_do(adjust_pointer_closure());
2438   FlatProfiler::oops_do(adjust_pointer_closure());
2439   Management::oops_do(adjust_pointer_closure());
2440   JvmtiExport::oops_do(adjust_pointer_closure());
2441   // SO_AllClasses
2442   SystemDictionary::oops_do(adjust_pointer_closure());
2443   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2444 
2445   // Now adjust pointers in remaining weak roots.  (All of which should
2446   // have been cleared if they pointed to non-surviving objects.)
2447   // Global (weak) JNI handles
2448   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2449 
2450   CodeCache::oops_do(adjust_pointer_closure());
2451   StringTable::oops_do(adjust_pointer_closure());
2452   ref_processor()->weak_oops_do(adjust_pointer_closure());
2453   // Roots were visited so references into the young gen in roots
2454   // may have been scanned.  Process them also.
2455   // Should the reference processor have a span that excludes
2456   // young gen objects?
2457   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2458 }
2459 
2460 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2461                                                       uint parallel_gc_threads)
2462 {
2463   TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2464 
2465   // Find the threads that are active
2466   unsigned int which = 0;
2467 
2468   const uint task_count = MAX2(parallel_gc_threads, 1U);
2469   for (uint j = 0; j < task_count; j++) {
2470     q->enqueue(new DrainStacksCompactionTask(j));
2471     ParCompactionManager::verify_region_list_empty(j);
2472     // Set the region stacks variables to "no" region stack values
2473     // so that they will be recognized and needing a region stack
2474     // in the stealing tasks if they do not get one by executing
2475     // a draining stack.
2476     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2477     cm->set_region_stack(NULL);
2478     cm->set_region_stack_index((uint)max_uintx);
2479   }
2480   ParCompactionManager::reset_recycled_stack_index();
2481 
2482   // Find all regions that are available (can be filled immediately) and
2483   // distribute them to the thread stacks.  The iteration is done in reverse


2517         // Assign regions to tasks in round-robin fashion.
2518         if (++which == task_count) {
2519           assert(which <= parallel_gc_threads,
2520             "Inconsistent number of workers");
2521           which = 0;
2522         }
2523       }
2524     }
2525   }
2526 
2527   if (TraceParallelOldGCCompactionPhase) {
2528     if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2529     gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2530   }
2531 }
2532 
2533 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2534 
2535 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2536                                                     uint parallel_gc_threads) {
2537   TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2538 
2539   ParallelCompactData& sd = PSParallelCompact::summary_data();
2540 
2541   // Iterate over all the spaces adding tasks for updating
2542   // regions in the dense prefix.  Assume that 1 gc thread
2543   // will work on opening the gaps and the remaining gc threads
2544   // will work on the dense prefix.
2545   unsigned int space_id;
2546   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2547     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2548     const MutableSpace* const space = _space_info[space_id].space();
2549 
2550     if (dense_prefix_end == space->bottom()) {
2551       // There is no dense prefix for this space.
2552       continue;
2553     }
2554 
2555     // The dense prefix is before this region.
2556     size_t region_index_end_dense_prefix =
2557         sd.addr_to_region_idx(dense_prefix_end);


2599         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2600                                              region_index_start,
2601                                              region_index_end));
2602         region_index_start = region_index_end;
2603       }
2604     }
2605     // This gets any part of the dense prefix that did not
2606     // fit evenly.
2607     if (region_index_start < region_index_end_dense_prefix) {
2608       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2609                                            region_index_start,
2610                                            region_index_end_dense_prefix));
2611     }
2612   }
2613 }
2614 
2615 void PSParallelCompact::enqueue_region_stealing_tasks(
2616                                      GCTaskQueue* q,
2617                                      ParallelTaskTerminator* terminator_ptr,
2618                                      uint parallel_gc_threads) {
2619   TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2620 
2621   // Once a thread has drained it's stack, it should try to steal regions from
2622   // other threads.
2623   if (parallel_gc_threads > 1) {
2624     for (uint j = 0; j < parallel_gc_threads; j++) {
2625       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2626     }
2627   }
2628 }
2629 
2630 void PSParallelCompact::compact() {
2631   // trace("5");
2632   TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2633 
2634   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2635   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2636   PSOldGen* old_gen = heap->old_gen();
2637   old_gen->start_array()->reset();
2638   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2639   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2640   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2641   ParallelTaskTerminator terminator(active_gc_threads, qset);
2642 
2643   GCTaskQueue* q = GCTaskQueue::create();
2644   enqueue_region_draining_tasks(q, active_gc_threads);
2645   enqueue_dense_prefix_tasks(q, active_gc_threads);
2646   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2647 
2648   {
2649     TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2650 
2651     gc_task_manager()->execute_and_wait(q);
2652 
2653 #ifdef  ASSERT
2654     // Verify that all regions have been processed before the deferred updates.
2655     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2656       verify_complete(SpaceId(id));
2657     }
2658 #endif
2659   }
2660 
2661   {
2662     // Update the deferred objects, if any.  Any compaction manager can be used.
2663     TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2664     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2665     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2666       update_deferred_objects(cm, SpaceId(id));
2667     }
2668   }
2669 }
2670 
2671 #ifdef  ASSERT
2672 void PSParallelCompact::verify_complete(SpaceId space_id) {
2673   // All Regions between space bottom() to new_top() should be marked as filled
2674   // and all Regions between new_top() and top() should be available (i.e.,
2675   // should have been emptied).
2676   ParallelCompactData& sd = summary_data();
2677   SpaceInfo si = _space_info[space_id];
2678   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2679   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2680   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2681   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2682   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2683 




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
  33 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
  35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  37 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  39 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  40 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  41 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  42 #include "gc_implementation/shared/gcHeapSummary.hpp"
  43 #include "gc_implementation/shared/gcTimer.hpp"
  44 #include "gc_implementation/shared/gcTrace.hpp"
  45 #include "gc_implementation/shared/gcTraceTime.hpp"
  46 #include "gc_implementation/shared/isGCActiveMark.hpp"
  47 #include "gc_interface/gcCause.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/referencePolicy.hpp"
  50 #include "memory/referenceProcessor.hpp"
  51 #include "oops/methodData.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/oop.pcgc.inline.hpp"
  54 #include "runtime/fprofiler.hpp"
  55 #include "runtime/safepoint.hpp"
  56 #include "runtime/vmThread.hpp"
  57 #include "services/management.hpp"
  58 #include "services/memoryService.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/events.hpp"
  61 #include "utilities/stack.inline.hpp"
  62 
  63 #include <math.h>
  64 
  65 // All sizes are in HeapWords.


 759   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 760   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 761   for (const size_t* p = beg; p < end; ++p) {
 762     assert(*p == 0, "not zero");
 763   }
 764 }
 765 
 766 void ParallelCompactData::verify_clear()
 767 {
 768   verify_clear(_region_vspace);
 769 }
 770 #endif  // #ifdef ASSERT
 771 
 772 #ifdef NOT_PRODUCT
 773 ParallelCompactData::RegionData* debug_region(size_t region_index) {
 774   ParallelCompactData& sd = PSParallelCompact::summary_data();
 775   return sd.region(region_index);
 776 }
 777 #endif
 778 
 779 STWGCTimer          PSParallelCompact::_gc_timer;
 780 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 781 elapsedTimer        PSParallelCompact::_accumulated_time;
 782 unsigned int        PSParallelCompact::_total_invocations = 0;
 783 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 784 jlong               PSParallelCompact::_time_of_last_gc = 0;
 785 CollectorCounters*  PSParallelCompact::_counters = NULL;
 786 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 787 ParallelCompactData PSParallelCompact::_summary_data;
 788 
 789 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 790 
 791 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 792 
 793 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 794 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 795 
 796 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
 797 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
 798 
 799 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
 800 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }


 934 
 935   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 936   const size_t end_region =
 937     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 938   _summary_data.clear_range(beg_region, end_region);
 939 
 940   // Clear the data used to 'split' regions.
 941   SplitInfo& split_info = _space_info[id].split_info();
 942   if (split_info.is_valid()) {
 943     split_info.clear();
 944   }
 945   DEBUG_ONLY(split_info.verify_clear();)
 946 }
 947 
 948 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
 949 {
 950   // Update the from & to space pointers in space_info, since they are swapped
 951   // at each young gen gc.  Do the update unconditionally (even though a
 952   // promotion failure does not swap spaces) because an unknown number of minor
 953   // collections will have swapped the spaces an unknown number of times.
 954   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
 955   ParallelScavengeHeap* heap = gc_heap();
 956   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 957   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 958 
 959   pre_gc_values->fill(heap);
 960 
 961   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 962   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 963 
 964   // Increment the invocation count
 965   heap->increment_total_collections(true);
 966 
 967   // We need to track unique mark sweep invocations as well.
 968   _total_invocations++;
 969 
 970   heap->print_heap_before_gc();
 971   heap->trace_heap_before_gc(&_gc_tracer);
 972 
 973   // Fill in TLABs
 974   heap->accumulate_statistics_all_tlabs();
 975   heap->ensure_parsability(true);  // retire TLABs
 976 
 977   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 978     HandleMark hm;  // Discard invalid handles created during verification
 979     Universe::verify(" VerifyBeforeGC:");
 980   }
 981 
 982   // Verify object start arrays
 983   if (VerifyObjectStartArray &&
 984       VerifyBeforeGC) {
 985     heap->old_gen()->verify_object_start_array();
 986   }
 987 
 988   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 989   DEBUG_ONLY(summary_data().verify_clear();)
 990 
 991   // Have worker threads release resources the next time they run a task.
 992   gc_task_manager()->release_all_resources();
 993 }
 994 
 995 void PSParallelCompact::post_compact()
 996 {
 997   GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
 998 
 999   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1000     // Clear the marking bitmap, summary data and split info.
1001     clear_data_covering_space(SpaceId(id));
1002     // Update top().  Must be done after clearing the bitmap and summary data.
1003     _space_info[id].publish_new_top();
1004   }
1005 
1006   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1007   MutableSpace* const from_space = _space_info[from_space_id].space();
1008   MutableSpace* const to_space   = _space_info[to_space_id].space();
1009 
1010   ParallelScavengeHeap* heap = gc_heap();
1011   bool eden_empty = eden_space->is_empty();
1012   if (!eden_empty) {
1013     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1014                                             heap->young_gen(), heap->old_gen());
1015   }
1016 
1017   // Update heap occupancy information which is used as input to the soft ref


1803     tty->print_cr("summarizing %d [%s] into %d [%s]:  "
1804                   "src=" PTR_FORMAT "-" PTR_FORMAT " "
1805                   SIZE_FORMAT "-" SIZE_FORMAT " "
1806                   "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1807                   SIZE_FORMAT "-" SIZE_FORMAT,
1808                   src_space_id, space_names[src_space_id],
1809                   dst_space_id, space_names[dst_space_id],
1810                   src_beg, src_end,
1811                   _summary_data.addr_to_region_idx(src_beg),
1812                   _summary_data.addr_to_region_idx(src_end),
1813                   dst_beg, dst_end,
1814                   _summary_data.addr_to_region_idx(dst_beg),
1815                   _summary_data.addr_to_region_idx(dst_end));
1816   }
1817 }
1818 #endif  // #ifndef PRODUCT
1819 
1820 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1821                                       bool maximum_compaction)
1822 {
1823   GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
1824   // trace("2");
1825 
1826 #ifdef  ASSERT
1827   if (TraceParallelOldGCMarkingPhase) {
1828     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1829                   "add_obj_bytes=" SIZE_FORMAT,
1830                   add_obj_count, add_obj_size * HeapWordSize);
1831     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1832                   "mark_bitmap_bytes=" SIZE_FORMAT,
1833                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1834   }
1835 #endif  // #ifdef ASSERT
1836 
1837   // Quick summarization of each space into itself, to see how much is live.
1838   summarize_spaces_quick();
1839 
1840   if (TraceParallelOldGCSummaryPhase) {
1841     tty->print_cr("summary_phase:  after summarizing each space to self");
1842     Universe::print();
1843     NOT_PRODUCT(print_region_ranges());


1966 
1967   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1968                                       maximum_heap_compaction);
1969 }
1970 
1971 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1972   size_t addr_region_index = addr_to_region_idx(addr);
1973   return region_index == addr_region_index;
1974 }
1975 
1976 // This method contains no policy. You should probably
1977 // be calling invoke() instead.
1978 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1979   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1980   assert(ref_processor() != NULL, "Sanity");
1981 
1982   if (GC_locker::check_active_before_gc()) {
1983     return false;
1984   }
1985 
1986   ParallelScavengeHeap* heap = gc_heap();
1987 
1988   _gc_timer.register_gc_start(os::elapsed_counter());
1989   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1990 
1991   TimeStamp marking_start;
1992   TimeStamp compaction_start;
1993   TimeStamp collection_exit;
1994 

1995   GCCause::Cause gc_cause = heap->gc_cause();
1996   PSYoungGen* young_gen = heap->young_gen();
1997   PSOldGen* old_gen = heap->old_gen();
1998   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1999 
2000   // The scope of casr should end after code that can change
2001   // CollectorPolicy::_should_clear_all_soft_refs.
2002   ClearedAllSoftRefs casr(maximum_heap_compaction,
2003                           heap->collector_policy());
2004 
2005   if (ZapUnusedHeapArea) {
2006     // Save information needed to minimize mangling
2007     heap->record_gen_tops_before_GC();
2008   }
2009 
2010   heap->pre_full_gc_dump(&_gc_timer);
2011 
2012   _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
2013 
2014   // Make sure data structures are sane, make the heap parsable, and do other
2015   // miscellaneous bookkeeping.
2016   PreGCValues pre_gc_values;
2017   pre_compact(&pre_gc_values);
2018 
2019   // Get the compaction manager reserved for the VM thread.
2020   ParCompactionManager* const vmthread_cm =
2021     ParCompactionManager::manager_array(gc_task_manager()->workers());
2022 
2023   // Place after pre_compact() where the number of invocations is incremented.
2024   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2025 
2026   {
2027     ResourceMark rm;
2028     HandleMark hm;
2029 
2030     // Set the number of GC threads to be used in this collection
2031     gc_task_manager()->set_active_gang();
2032     gc_task_manager()->task_idle_workers();
2033     heap->set_par_threads(gc_task_manager()->active_workers());
2034 
2035     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2036     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2037     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
2038     TraceCollectorStats tcs(counters());
2039     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2040 
2041     if (TraceGen1Time) accumulated_time()->start();
2042 
2043     // Let the size policy know we're starting
2044     size_policy->major_collection_begin();
2045 
2046     CodeCache::gc_prologue();
2047     Threads::gc_prologue();
2048 
2049     COMPILER2_PRESENT(DerivedPointerTable::clear());
2050 
2051     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
2052     ref_processor()->setup_policy(maximum_heap_compaction);
2053 
2054     bool marked_for_unloading = false;
2055 
2056     marking_start.update();
2057     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
2058 
2059     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
2060       && gc_cause == GCCause::_java_lang_system_gc;
2061     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2062 
2063     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2064     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2065 
2066     // adjust_roots() updates Universe::_intArrayKlassObj which is
2067     // needed by the compaction for filling holes in the dense prefix.
2068     adjust_roots();
2069 
2070     compaction_start.update();
2071     compact();
2072 
2073     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
2074     // done before resizing.
2075     post_compact();
2076 
2077     // Let the size policy know we're done


2190   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2191     HandleMark hm;  // Discard invalid handles created during verification
2192     Universe::verify(" VerifyAfterGC:");
2193   }
2194 
2195   // Re-verify object start arrays
2196   if (VerifyObjectStartArray &&
2197       VerifyAfterGC) {
2198     old_gen->verify_object_start_array();
2199   }
2200 
2201   if (ZapUnusedHeapArea) {
2202     old_gen->object_space()->check_mangled_unused_area_complete();
2203   }
2204 
2205   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2206 
2207   collection_exit.update();
2208 
2209   heap->print_heap_after_gc();
2210   heap->trace_heap_after_gc(&_gc_tracer);
2211 
2212   if (PrintGCTaskTimeStamps) {
2213     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2214                            INT64_FORMAT,
2215                            marking_start.ticks(), compaction_start.ticks(),
2216                            collection_exit.ticks());
2217     gc_task_manager()->print_task_time_stamps();
2218   }
2219 
2220   heap->post_full_gc_dump(&_gc_timer);
2221 
2222 #ifdef TRACESPINNING
2223   ParallelTaskTerminator::print_termination_counts();
2224 #endif
2225 
2226   _gc_timer.register_gc_end(os::elapsed_counter());
2227 
2228   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2229   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2230 
2231   return true;
2232 }
2233 
2234 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2235                                              PSYoungGen* young_gen,
2236                                              PSOldGen* old_gen) {
2237   MutableSpace* const eden_space = young_gen->eden_space();
2238   assert(!eden_space->is_empty(), "eden must be non-empty");
2239   assert(young_gen->virtual_space()->alignment() ==
2240          old_gen->virtual_space()->alignment(), "alignments do not match");
2241 
2242   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2243     return false;
2244   }
2245 
2246   // Both generations must be completely committed.
2247   if (young_gen->virtual_space()->uncommitted_size() != 0) {
2248     return false;
2249   }
2250   if (old_gen->virtual_space()->uncommitted_size() != 0) {


2309   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2310     start_array->allocate_block(p);
2311   }
2312 
2313   // Could update the promoted average here, but it is not typically updated at
2314   // full GCs and the value to use is unclear.  Something like
2315   //
2316   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2317 
2318   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2319   return true;
2320 }
2321 
2322 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2323   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2324     "shouldn't return NULL");
2325   return ParallelScavengeHeap::gc_task_manager();
2326 }
2327 
2328 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2329                                       bool maximum_heap_compaction,
2330                                       ParallelOldTracer *gc_tracer) {
2331   // Recursively traverse all live objects and mark them
2332   GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
2333 
2334   ParallelScavengeHeap* heap = gc_heap();
2335   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2336   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2337   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2338   ParallelTaskTerminator terminator(active_gc_threads, qset);
2339 
2340   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2341   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2342 
2343   // Need new claim bits before marking starts.
2344   ClassLoaderDataGraph::clear_claimed_marks();
2345 
2346   {
2347     GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
2348 
2349     ParallelScavengeHeap::ParStrongRootsScope psrs;
2350 
2351     GCTaskQueue* q = GCTaskQueue::create();
2352 
2353     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2354     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2355     // We scan the thread roots in parallel
2356     Threads::create_thread_roots_marking_tasks(q);
2357     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2358     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2359     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2360     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2361     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2362     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2363     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2364 
2365     if (active_gc_threads > 1) {
2366       for (uint j = 0; j < active_gc_threads; j++) {
2367         q->enqueue(new StealMarkingTask(&terminator));
2368       }
2369     }
2370 
2371     gc_task_manager()->execute_and_wait(q);
2372   }
2373 
2374   // Process reference objects found during marking
2375   {
2376     GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
2377 
2378     ReferenceProcessorStats stats;
2379     if (ref_processor()->processing_is_mt()) {
2380       RefProcTaskExecutor task_executor;
2381       stats = ref_processor()->process_discovered_references(
2382         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2383         &task_executor, &_gc_timer);
2384     } else {
2385       stats = ref_processor()->process_discovered_references(
2386         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2387         &_gc_timer);
2388     }
2389 
2390     gc_tracer->report_gc_reference_stats(stats);
2391   }
2392 
2393   GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
2394 
2395   // This is the point where the entire marking should have completed.
2396   assert(cm->marking_stacks_empty(), "Marking should have completed");
2397 
2398   // Follow system dictionary roots and unload classes.
2399   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2400 
2401   // Unload nmethods.
2402   CodeCache::do_unloading(is_alive_closure(), purged_class);
2403 
2404   // Prune dead klasses from subklass/sibling/implementor lists.
2405   Klass::clean_weak_klass_links(is_alive_closure());
2406 
2407   // Delete entries for dead interned strings.
2408   StringTable::unlink(is_alive_closure());
2409 
2410   // Clean up unreferenced symbols in symbol table.
2411   SymbolTable::unlink();
2412   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2413 }
2414 
2415 void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
2416   ClassLoaderData* cld = klass->class_loader_data();
2417   // The actual processing of the klass is done when we
2418   // traverse the list of Klasses in the class loader data.
2419   PSParallelCompact::follow_class_loader(cm, cld);
2420 }
2421 
2422 void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
2423   ClassLoaderData* cld = klass->class_loader_data();
2424   // The actual processing of the klass is done when we
2425   // traverse the list of Klasses in the class loader data.
2426   PSParallelCompact::adjust_class_loader(cm, cld);
2427 }
2428 
2429 void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
2430                                             ClassLoaderData* cld) {
2431   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2432   PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
2433 
2434   cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
2435 }
2436 
2437 void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
2438                                             ClassLoaderData* cld) {
2439   cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
2440                PSParallelCompact::adjust_klass_closure(),
2441                true);
2442 }
2443 
2444 // This should be moved to the shared markSweep code!
2445 class PSAlwaysTrueClosure: public BoolObjectClosure {
2446 public:
2447   bool do_object_b(oop p) { return true; }
2448 };
2449 static PSAlwaysTrueClosure always_true;
2450 
2451 void PSParallelCompact::adjust_roots() {
2452   // Adjust the pointers to reflect the new locations
2453   GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
2454 
2455   // Need new claim bits when tracing through and adjusting pointers.
2456   ClassLoaderDataGraph::clear_claimed_marks();
2457 
2458   // General strong roots.
2459   Universe::oops_do(adjust_pointer_closure());
2460   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
2461   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2462   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2463   ObjectSynchronizer::oops_do(adjust_pointer_closure());
2464   FlatProfiler::oops_do(adjust_pointer_closure());
2465   Management::oops_do(adjust_pointer_closure());
2466   JvmtiExport::oops_do(adjust_pointer_closure());
2467   // SO_AllClasses
2468   SystemDictionary::oops_do(adjust_pointer_closure());
2469   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2470 
2471   // Now adjust pointers in remaining weak roots.  (All of which should
2472   // have been cleared if they pointed to non-surviving objects.)
2473   // Global (weak) JNI handles
2474   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2475 
2476   CodeCache::oops_do(adjust_pointer_closure());
2477   StringTable::oops_do(adjust_pointer_closure());
2478   ref_processor()->weak_oops_do(adjust_pointer_closure());
2479   // Roots were visited so references into the young gen in roots
2480   // may have been scanned.  Process them also.
2481   // Should the reference processor have a span that excludes
2482   // young gen objects?
2483   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2484 }
2485 
2486 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2487                                                       uint parallel_gc_threads)
2488 {
2489   GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
2490 
2491   // Find the threads that are active
2492   unsigned int which = 0;
2493 
2494   const uint task_count = MAX2(parallel_gc_threads, 1U);
2495   for (uint j = 0; j < task_count; j++) {
2496     q->enqueue(new DrainStacksCompactionTask(j));
2497     ParCompactionManager::verify_region_list_empty(j);
2498     // Set the region stacks variables to "no" region stack values
2499     // so that they will be recognized and needing a region stack
2500     // in the stealing tasks if they do not get one by executing
2501     // a draining stack.
2502     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2503     cm->set_region_stack(NULL);
2504     cm->set_region_stack_index((uint)max_uintx);
2505   }
2506   ParCompactionManager::reset_recycled_stack_index();
2507 
2508   // Find all regions that are available (can be filled immediately) and
2509   // distribute them to the thread stacks.  The iteration is done in reverse


2543         // Assign regions to tasks in round-robin fashion.
2544         if (++which == task_count) {
2545           assert(which <= parallel_gc_threads,
2546             "Inconsistent number of workers");
2547           which = 0;
2548         }
2549       }
2550     }
2551   }
2552 
2553   if (TraceParallelOldGCCompactionPhase) {
2554     if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2555     gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2556   }
2557 }
2558 
2559 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2560 
2561 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2562                                                     uint parallel_gc_threads) {
2563   GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
2564 
2565   ParallelCompactData& sd = PSParallelCompact::summary_data();
2566 
2567   // Iterate over all the spaces adding tasks for updating
2568   // regions in the dense prefix.  Assume that 1 gc thread
2569   // will work on opening the gaps and the remaining gc threads
2570   // will work on the dense prefix.
2571   unsigned int space_id;
2572   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2573     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2574     const MutableSpace* const space = _space_info[space_id].space();
2575 
2576     if (dense_prefix_end == space->bottom()) {
2577       // There is no dense prefix for this space.
2578       continue;
2579     }
2580 
2581     // The dense prefix is before this region.
2582     size_t region_index_end_dense_prefix =
2583         sd.addr_to_region_idx(dense_prefix_end);


2625         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2626                                              region_index_start,
2627                                              region_index_end));
2628         region_index_start = region_index_end;
2629       }
2630     }
2631     // This gets any part of the dense prefix that did not
2632     // fit evenly.
2633     if (region_index_start < region_index_end_dense_prefix) {
2634       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2635                                            region_index_start,
2636                                            region_index_end_dense_prefix));
2637     }
2638   }
2639 }
2640 
2641 void PSParallelCompact::enqueue_region_stealing_tasks(
2642                                      GCTaskQueue* q,
2643                                      ParallelTaskTerminator* terminator_ptr,
2644                                      uint parallel_gc_threads) {
2645   GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
2646 
2647   // Once a thread has drained it's stack, it should try to steal regions from
2648   // other threads.
2649   if (parallel_gc_threads > 1) {
2650     for (uint j = 0; j < parallel_gc_threads; j++) {
2651       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2652     }
2653   }
2654 }
2655 
2656 void PSParallelCompact::compact() {
2657   // trace("5");
2658   GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
2659 
2660   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2661   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2662   PSOldGen* old_gen = heap->old_gen();
2663   old_gen->start_array()->reset();
2664   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2665   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2666   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2667   ParallelTaskTerminator terminator(active_gc_threads, qset);
2668 
2669   GCTaskQueue* q = GCTaskQueue::create();
2670   enqueue_region_draining_tasks(q, active_gc_threads);
2671   enqueue_dense_prefix_tasks(q, active_gc_threads);
2672   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2673 
2674   {
2675     GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
2676 
2677     gc_task_manager()->execute_and_wait(q);
2678 
2679 #ifdef  ASSERT
2680     // Verify that all regions have been processed before the deferred updates.
2681     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2682       verify_complete(SpaceId(id));
2683     }
2684 #endif
2685   }
2686 
2687   {
2688     // Update the deferred objects, if any.  Any compaction manager can be used.
2689     GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
2690     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2691     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2692       update_deferred_objects(cm, SpaceId(id));
2693     }
2694   }
2695 }
2696 
2697 #ifdef  ASSERT
2698 void PSParallelCompact::verify_complete(SpaceId space_id) {
2699   // All Regions between space bottom() to new_top() should be marked as filled
2700   // and all Regions between new_top() and top() should be available (i.e.,
2701   // should have been emptied).
2702   ParallelCompactData& sd = summary_data();
2703   SpaceInfo si = _space_info[space_id];
2704   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2705   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2706   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2707   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2708   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2709