1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "gc/g1/g1FullGCScope.hpp"
  33 #include "gc/g1/g1MarkSweep.hpp"
  34 #include "gc/g1/g1RootProcessor.hpp"
  35 #include "gc/g1/g1StringDedup.hpp"
  36 #include "gc/serial/markSweep.inline.hpp"
  37 #include "gc/shared/gcHeapSummary.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/gcTimer.hpp"
  40 #include "gc/shared/gcTrace.hpp"
  41 #include "gc/shared/gcTraceTime.inline.hpp"
  42 #include "gc/shared/genCollectedHeap.hpp"
  43 #include "gc/shared/modRefBarrierSet.hpp"
  44 #include "gc/shared/referencePolicy.hpp"
  45 #include "gc/shared/space.hpp"
  46 #include "oops/instanceRefKlass.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "prims/jvmtiExport.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "runtime/biasedLocking.hpp"
  51 #include "runtime/synchronizer.hpp"
  52 #include "runtime/thread.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 class HeapRegion;
  58 
  59 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
  60                                       bool clear_all_softrefs) {
  61   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  62   HandleMark hm;  // Discard invalid handles created during gc
  63 
  64 #if defined(COMPILER2) || INCLUDE_JVMCI
  65   DerivedPointerTable::clear();
  66 #endif
  67 #ifdef ASSERT
  68   if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
  69     assert(clear_all_softrefs, "Policy should have been checked earler");
  70   }
  71 #endif
  72   // hook up weak ref data so it can be used during Mark-Sweep
  73   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
  74   assert(rp != NULL, "should be non-NULL");
  75   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
  76 
  77   GenMarkSweep::set_ref_processor(rp);
  78   rp->setup_policy(clear_all_softrefs);
  79 
  80   // When collecting the permanent generation Method*s may be moving,
  81   // so we either have to flush all bcp data or convert it into bci.
  82   CodeCache::gc_prologue();
  83 
  84   bool marked_for_unloading = false;
  85 
  86   allocate_stacks();
  87 
  88   // We should save the marks of the currently locked biased monitors.
  89   // The marking doesn't preserve the marks of biased objects.
  90   BiasedLocking::preserve_marks();
  91 
  92   // Process roots and do the marking.
  93   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
  94 
  95   // Prepare compaction.
  96   mark_sweep_phase2();
  97 
  98 #if defined(COMPILER2) || INCLUDE_JVMCI
  99   // Don't add any more derived pointers during phase3
 100   DerivedPointerTable::set_active(false);
 101 #endif
 102 
 103   // Adjust all pointers.
 104   mark_sweep_phase3();
 105 
 106   // Do the actual compaction.
 107   mark_sweep_phase4();
 108 
 109   GenMarkSweep::restore_marks();
 110   BiasedLocking::restore_marks();
 111   GenMarkSweep::deallocate_stacks();
 112 
 113 #if defined(COMPILER2) || INCLUDE_JVMCI
 114   // Now update the derived pointers.
 115   DerivedPointerTable::update_pointers();
 116 #endif
 117 
 118   CodeCache::gc_epilogue();
 119   JvmtiExport::gc_epilogue();
 120 
 121   // refs processing: clean slate
 122   GenMarkSweep::set_ref_processor(NULL);
 123 }
 124 
 125 STWGCTimer* G1MarkSweep::gc_timer() {
 126   return G1FullGCScope::instance()->timer();
 127 }
 128 
 129 SerialOldTracer* G1MarkSweep::gc_tracer() {
 130   return G1FullGCScope::instance()->tracer();
 131 }
 132 
 133 void G1MarkSweep::allocate_stacks() {
 134   GenMarkSweep::_preserved_count_max = 0;
 135   GenMarkSweep::_preserved_marks = NULL;
 136   GenMarkSweep::_preserved_count = 0;
 137 }
 138 
 139 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
 140                                     bool clear_all_softrefs) {
 141   // Recursively traverse all live objects and mark them
 142   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
 143 
 144   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 145 
 146   // Need cleared claim bits for the roots processing
 147   ClassLoaderDataGraph::clear_claimed_marks();
 148 
 149   MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
 150   {
 151     G1RootProcessor root_processor(g1h, 1);
 152     if (ClassUnloading) {
 153       root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
 154                                           &GenMarkSweep::follow_cld_closure,
 155                                           &follow_code_closure);
 156     } else {
 157       root_processor.process_all_roots_no_string_table(
 158                                           &GenMarkSweep::follow_root_closure,
 159                                           &GenMarkSweep::follow_cld_closure,
 160                                           &follow_code_closure);
 161     }
 162   }
 163 
 164   {
 165     GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
 166 
 167     // Process reference objects found during marking
 168     ReferenceProcessor* rp = GenMarkSweep::ref_processor();
 169     assert(rp == g1h->ref_processor_stw(), "Sanity");
 170 
 171     rp->setup_policy(clear_all_softrefs);
 172     ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q());
 173 
 174     const ReferenceProcessorStats& stats =
 175         rp->process_discovered_references(&GenMarkSweep::is_alive,
 176                                           &GenMarkSweep::keep_alive,
 177                                           &GenMarkSweep::follow_stack_closure,
 178                                           NULL,
 179                                           &pt);
 180     gc_tracer()->report_gc_reference_stats(stats);
 181     pt.print_all_references();
 182   }
 183 
 184   // This is the point where the entire marking should have completed.
 185   assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
 186 
 187   if (ClassUnloading) {
 188     GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
 189 
 190     // Unload classes and purge the SystemDictionary.
 191     bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive, gc_timer());
 192 
 193     g1h->complete_cleaning(&GenMarkSweep::is_alive, purged_class);
 194   } else {
 195     GCTraceTime(Debug, gc, phases) trace("Cleanup", gc_timer());
 196     g1h->partial_cleaning(&GenMarkSweep::is_alive, true, true, G1StringDedup::is_enabled());
 197   }
 198 
 199   if (VerifyDuringGC) {
 200     HandleMark hm;  // handle scope
 201 #if defined(COMPILER2) || INCLUDE_JVMCI
 202     DerivedPointerTableDeactivate dpt_deact;
 203 #endif
 204     g1h->prepare_for_verify();
 205     // Note: we can verify only the heap here. When an object is
 206     // marked, the previous value of the mark word (including
 207     // identity hash values, ages, etc) is preserved, and the mark
 208     // word is set to markOop::marked_value - effectively removing
 209     // any hash values from the mark word. These hash values are
 210     // used when verifying the dictionaries and so removing them
 211     // from the mark word can make verification of the dictionaries
 212     // fail. At the end of the GC, the original mark word values
 213     // (including hash values) are restored to the appropriate
 214     // objects.
 215     GCTraceTime(Info, gc, verify)("During GC (full)");
 216     g1h->verify(VerifyOption_G1UseMarkWord);
 217   }
 218 
 219   gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
 220 }
 221 
 222 
 223 void G1MarkSweep::mark_sweep_phase2() {
 224   // Now all live objects are marked, compute the new object addresses.
 225 
 226   // It is not required that we traverse spaces in the same order in
 227   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 228   // tracking expects us to do so. See comment under phase4.
 229 
 230   GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
 231 
 232   prepare_compaction();
 233 }
 234 
 235 class G1AdjustPointersClosure: public HeapRegionClosure {
 236  public:
 237   bool doHeapRegion(HeapRegion* r) {
 238     if (r->is_humongous()) {
 239       if (r->is_starts_humongous()) {
 240         // We must adjust the pointers on the single H object.
 241         oop obj = oop(r->bottom());
 242         // point all the oops to the new location
 243         MarkSweep::adjust_pointers(obj);
 244       }
 245     } else if (!r->is_closed_archive()) {
 246       // This really ought to be "as_CompactibleSpace"...
 247       r->adjust_pointers();
 248     }
 249     return false;
 250   }
 251 };
 252 
 253 void G1MarkSweep::mark_sweep_phase3() {
 254   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 255 
 256   // Adjust the pointers to reflect the new locations
 257   GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
 258 
 259   // Need cleared claim bits for the roots processing
 260   ClassLoaderDataGraph::clear_claimed_marks();
 261 
 262   CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
 263   {
 264     G1RootProcessor root_processor(g1h, 1);
 265     root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
 266                                      &GenMarkSweep::adjust_cld_closure,
 267                                      &adjust_code_closure);
 268   }
 269 
 270   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
 271   g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
 272 
 273   // Now adjust pointers in remaining weak roots.  (All of which should
 274   // have been cleared if they pointed to non-surviving objects.)
 275   JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
 276 
 277   if (G1StringDedup::is_enabled()) {
 278     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
 279   }
 280 
 281   GenMarkSweep::adjust_marks();
 282 
 283   G1AdjustPointersClosure blk;
 284   g1h->heap_region_iterate(&blk);
 285 }
 286 
 287 class G1SpaceCompactClosure: public HeapRegionClosure {
 288 public:
 289   G1SpaceCompactClosure() {}
 290 
 291   bool doHeapRegion(HeapRegion* hr) {
 292     if (hr->is_humongous()) {
 293       if (hr->is_starts_humongous()) {
 294         oop obj = oop(hr->bottom());
 295         if (obj->is_gc_marked()) {
 296           obj->init_mark();
 297         } else {
 298           assert(hr->is_empty(), "Should have been cleared in phase 2.");
 299         }
 300       }
 301       hr->reset_during_compaction();
 302     } else if (!hr->is_pinned()) {
 303       hr->compact();
 304     }
 305     return false;
 306   }
 307 };
 308 
 309 void G1MarkSweep::mark_sweep_phase4() {
 310   // All pointers are now adjusted, move objects accordingly
 311 
 312   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
 313   // in the same order in phase2, phase3 and phase4. We don't quite do that
 314   // here (code and comment not fixed for perm removal), so we tell the validate code
 315   // to use a higher index (saved from phase2) when verifying perm_gen.
 316   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 317 
 318   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
 319 
 320   G1SpaceCompactClosure blk;
 321   g1h->heap_region_iterate(&blk);
 322 
 323 }
 324 
 325 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
 326   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 327   g1h->heap_region_iterate(blk);
 328   blk->update_sets();
 329 }
 330 
 331 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
 332   HeapWord* end = hr->end();
 333   FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
 334 
 335   hr->set_containing_set(NULL);
 336   _humongous_regions_removed++;
 337 
 338   _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
 339   prepare_for_compaction(hr, end);
 340   dummy_free_list.remove_all();
 341 }
 342 
 343 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
 344   // If this is the first live region that we came across which we can compact,
 345   // initialize the CompactPoint.
 346   if (!is_cp_initialized()) {
 347     _cp.space = hr;
 348     _cp.threshold = hr->initialize_threshold();
 349   }
 350   prepare_for_compaction_work(&_cp, hr, end);
 351 }
 352 
 353 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
 354                                                           HeapRegion* hr,
 355                                                           HeapWord* end) {
 356   hr->prepare_for_compaction(cp);
 357   // Also clear the part of the card table that will be unused after
 358   // compaction.
 359   _mrbs->clear(MemRegion(hr->compaction_top(), end));
 360 }
 361 
 362 void G1PrepareCompactClosure::update_sets() {
 363   // We'll recalculate total used bytes and recreate the free list
 364   // at the end of the GC, so no point in updating those values here.
 365   _g1h->remove_from_old_sets(0, _humongous_regions_removed);
 366 }
 367 
 368 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
 369   if (hr->is_humongous()) {
 370     oop obj = oop(hr->humongous_start_region()->bottom());
 371     if (hr->is_starts_humongous() && obj->is_gc_marked()) {
 372       obj->forward_to(obj);
 373     }
 374     if (!obj->is_gc_marked()) {
 375       free_humongous_region(hr);
 376     }
 377   } else if (!hr->is_pinned()) {
 378     prepare_for_compaction(hr, hr->end());
 379   }
 380   return false;
 381 }