1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_genMarkSweep.cpp.incl"
  27 
  28 void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
  29   bool clear_all_softrefs) {
  30   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  31 
  32   GenCollectedHeap* gch = GenCollectedHeap::heap();
  33 #ifdef ASSERT
  34   if (gch->collector_policy()->should_clear_all_soft_refs()) {
  35     assert(clear_all_softrefs, "Policy should have been checked earlier");
  36   }
  37 #endif
  38 
  39   // hook up weak ref data so it can be used during Mark-Sweep
  40   assert(ref_processor() == NULL, "no stomping");
  41   assert(rp != NULL, "should be non-NULL");
  42   _ref_processor = rp;
  43   rp->setup_policy(clear_all_softrefs);
  44 
  45   TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
  46 
  47   // When collecting the permanent generation methodOops may be moving,
  48   // so we either have to flush all bcp data or convert it into bci.
  49   CodeCache::gc_prologue();
  50   Threads::gc_prologue();
  51 
  52   // Increment the invocation count for the permanent generation, since it is
  53   // implicitly collected whenever we do a full mark sweep collection.
  54   gch->perm_gen()->stat_record()->invocations++;
  55 
  56   // Capture heap size before collection for printing.
  57   size_t gch_prev_used = gch->used();
  58 
  59   // Some of the card table updates below assume that the perm gen is
  60   // also being collected.
  61   assert(level == gch->n_gens() - 1,
  62          "All generations are being collected, ergo perm gen too.");
  63 
  64   // Capture used regions for each generation that will be
  65   // subject to collection, so that card table adjustments can
  66   // be made intelligently (see clear / invalidate further below).
  67   gch->save_used_regions(level, true /* perm */);
  68 
  69   allocate_stacks();
  70 
  71   mark_sweep_phase1(level, clear_all_softrefs);
  72 
  73   mark_sweep_phase2();
  74 
  75   // Don't add any more derived pointers during phase3
  76   COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
  77   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
  78 
  79   mark_sweep_phase3(level);
  80 
  81   VALIDATE_MARK_SWEEP_ONLY(
  82     if (ValidateMarkSweep) {
  83       guarantee(_root_refs_stack->length() == 0, "should be empty by now");
  84     }
  85   )
  86 
  87   mark_sweep_phase4();
  88 
  89   VALIDATE_MARK_SWEEP_ONLY(
  90     if (ValidateMarkSweep) {
  91       guarantee(_live_oops->length() == _live_oops_moved_to->length(),
  92                 "should be the same size");
  93     }
  94   )
  95 
  96   restore_marks();
  97 
  98   // Set saved marks for allocation profiler (and other things? -- dld)
  99   // (Should this be in general part?)
 100   gch->save_marks();
 101 
 102   deallocate_stacks();
 103 
 104   // If compaction completely evacuated all generations younger than this
 105   // one, then we can clear the card table.  Otherwise, we must invalidate
 106   // it (consider all cards dirty).  In the future, we might consider doing
 107   // compaction within generations only, and doing card-table sliding.
 108   bool all_empty = true;
 109   for (int i = 0; all_empty && i < level; i++) {
 110     Generation* g = gch->get_gen(i);
 111     all_empty = all_empty && gch->get_gen(i)->used() == 0;
 112   }
 113   GenRemSet* rs = gch->rem_set();
 114   // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
 115   if (all_empty) {
 116     // We've evacuated all generations below us.
 117     Generation* g = gch->get_gen(level);
 118     rs->clear_into_younger(g, true /* perm */);
 119   } else {
 120     // Invalidate the cards corresponding to the currently used
 121     // region and clear those corresponding to the evacuated region
 122     // of all generations just collected (i.e. level and younger).
 123     rs->invalidate_or_clear(gch->get_gen(level),
 124                             true /* younger */,
 125                             true /* perm */);
 126   }
 127 
 128   Threads::gc_epilogue();
 129   CodeCache::gc_epilogue();
 130 
 131   if (PrintGC && !PrintGCDetails) {
 132     gch->print_heap_change(gch_prev_used);
 133   }
 134 
 135   // refs processing: clean slate
 136   _ref_processor = NULL;
 137 
 138   // Update heap occupancy information which is used as
 139   // input to soft ref clearing policy at the next gc.
 140   Universe::update_heap_info_at_gc();
 141 
 142   // Update time of last gc for all generations we collected
 143   // (which curently is all the generations in the heap).
 144   gch->update_time_of_last_gc(os::javaTimeMillis());
 145 }
 146 
 147 void GenMarkSweep::allocate_stacks() {
 148   GenCollectedHeap* gch = GenCollectedHeap::heap();
 149   // Scratch request on behalf of oldest generation; will do no
 150   // allocation.
 151   ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0);
 152 
 153   // $$$ To cut a corner, we'll only use the first scratch block, and then
 154   // revert to malloc.
 155   if (scratch != NULL) {
 156     _preserved_count_max =
 157       scratch->num_words * HeapWordSize / sizeof(PreservedMark);
 158   } else {
 159     _preserved_count_max = 0;
 160   }
 161 
 162   _preserved_marks = (PreservedMark*)scratch;
 163   _preserved_count = 0;
 164 
 165 #ifdef VALIDATE_MARK_SWEEP
 166   if (ValidateMarkSweep) {
 167     _root_refs_stack    = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
 168     _other_refs_stack   = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
 169     _adjusted_pointers  = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
 170     _live_oops          = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
 171     _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
 172     _live_oops_size     = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
 173   }
 174   if (RecordMarkSweepCompaction) {
 175     if (_cur_gc_live_oops == NULL) {
 176       _cur_gc_live_oops           = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
 177       _cur_gc_live_oops_moved_to  = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
 178       _cur_gc_live_oops_size      = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
 179       _last_gc_live_oops          = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
 180       _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
 181       _last_gc_live_oops_size     = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
 182     } else {
 183       _cur_gc_live_oops->clear();
 184       _cur_gc_live_oops_moved_to->clear();
 185       _cur_gc_live_oops_size->clear();
 186     }
 187   }
 188 #endif
 189 }
 190 
 191 
 192 void GenMarkSweep::deallocate_stacks() {
 193   if (!UseG1GC) {
 194     GenCollectedHeap* gch = GenCollectedHeap::heap();
 195     gch->release_scratch();
 196   }
 197 
 198   _preserved_mark_stack.clear(true);
 199   _preserved_oop_stack.clear(true);
 200   _marking_stack.clear();
 201   _objarray_stack.clear(true);
 202   _revisit_klass_stack.clear(true);
 203   _revisit_mdo_stack.clear(true);
 204 
 205 #ifdef VALIDATE_MARK_SWEEP
 206   if (ValidateMarkSweep) {
 207     delete _root_refs_stack;
 208     delete _other_refs_stack;
 209     delete _adjusted_pointers;
 210     delete _live_oops;
 211     delete _live_oops_size;
 212     delete _live_oops_moved_to;
 213     _live_oops_index = 0;
 214     _live_oops_index_at_perm = 0;
 215   }
 216 #endif
 217 }
 218 
 219 void GenMarkSweep::mark_sweep_phase1(int level,
 220                                   bool clear_all_softrefs) {
 221   // Recursively traverse all live objects and mark them
 222   EventMark m("1 mark object");
 223   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
 224   trace(" 1");
 225 
 226   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
 227 
 228   GenCollectedHeap* gch = GenCollectedHeap::heap();
 229 
 230   // Because follow_root_closure is created statically, cannot
 231   // use OopsInGenClosure constructor which takes a generation,
 232   // as the Universe has not been created when the static constructors
 233   // are run.
 234   follow_root_closure.set_orig_generation(gch->get_gen(level));
 235 
 236   gch->gen_process_strong_roots(level,
 237                                 false, // Younger gens are not roots.
 238                                 true,  // activate StrongRootsScope
 239                                 true,  // Collecting permanent generation.
 240                                 SharedHeap::SO_SystemClasses,
 241                                 &follow_root_closure,
 242                                 true,   // walk code active on stacks
 243                                 &follow_root_closure);
 244 
 245   // Process reference objects found during marking
 246   {
 247     ref_processor()->setup_policy(clear_all_softrefs);
 248     ref_processor()->process_discovered_references(
 249       &is_alive, &keep_alive, &follow_stack_closure, NULL);
 250   }
 251 
 252   // Follow system dictionary roots and unload classes
 253   bool purged_class = SystemDictionary::do_unloading(&is_alive);
 254 
 255   // Follow code cache roots
 256   CodeCache::do_unloading(&is_alive, &keep_alive, purged_class);
 257   follow_stack(); // Flush marking stack
 258 
 259   // Update subklass/sibling/implementor links of live klasses
 260   follow_weak_klass_links();
 261   assert(_marking_stack.is_empty(), "just drained");
 262 
 263   // Visit memoized MDO's and clear any unmarked weak refs
 264   follow_mdo_weak_refs();
 265   assert(_marking_stack.is_empty(), "just drained");
 266 
 267   // Visit symbol and interned string tables and delete unmarked oops
 268   SymbolTable::unlink(&is_alive);
 269   StringTable::unlink(&is_alive);
 270 
 271   assert(_marking_stack.is_empty(), "stack should be empty by now");
 272 }
 273 
 274 
 275 void GenMarkSweep::mark_sweep_phase2() {
 276   // Now all live objects are marked, compute the new object addresses.
 277 
 278   // It is imperative that we traverse perm_gen LAST. If dead space is
 279   // allowed a range of dead object may get overwritten by a dead int
 280   // array. If perm_gen is not traversed last a klassOop may get
 281   // overwritten. This is fine since it is dead, but if the class has dead
 282   // instances we have to skip them, and in order to find their size we
 283   // need the klassOop!
 284   //
 285   // It is not required that we traverse spaces in the same order in
 286   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 287   // tracking expects us to do so. See comment under phase4.
 288 
 289   GenCollectedHeap* gch = GenCollectedHeap::heap();
 290   Generation* pg = gch->perm_gen();
 291 
 292   EventMark m("2 compute new addresses");
 293   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
 294   trace("2");
 295 
 296   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
 297 
 298   gch->prepare_for_compaction();
 299 
 300   VALIDATE_MARK_SWEEP_ONLY(_live_oops_index_at_perm = _live_oops_index);
 301   CompactPoint perm_cp(pg, NULL, NULL);
 302   pg->prepare_for_compaction(&perm_cp);
 303 }
 304 
 305 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure {
 306 public:
 307   void do_generation(Generation* gen) {
 308     gen->adjust_pointers();
 309   }
 310 };
 311 
 312 void GenMarkSweep::mark_sweep_phase3(int level) {
 313   GenCollectedHeap* gch = GenCollectedHeap::heap();
 314   Generation* pg = gch->perm_gen();
 315 
 316   // Adjust the pointers to reflect the new locations
 317   EventMark m("3 adjust pointers");
 318   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
 319   trace("3");
 320 
 321   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
 322 
 323   // Needs to be done before the system dictionary is adjusted.
 324   pg->pre_adjust_pointers();
 325 
 326   // Because the two closures below are created statically, cannot
 327   // use OopsInGenClosure constructor which takes a generation,
 328   // as the Universe has not been created when the static constructors
 329   // are run.
 330   adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level));
 331   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
 332 
 333   gch->gen_process_strong_roots(level,
 334                                 false, // Younger gens are not roots.
 335                                 true,  // activate StrongRootsScope
 336                                 true,  // Collecting permanent generation.
 337                                 SharedHeap::SO_AllClasses,
 338                                 &adjust_root_pointer_closure,
 339                                 false, // do not walk code
 340                                 &adjust_root_pointer_closure);
 341 
 342   // Now adjust pointers in remaining weak roots.  (All of which should
 343   // have been cleared if they pointed to non-surviving objects.)
 344   CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
 345                                                    /*do_marking=*/ false);
 346   gch->gen_process_weak_roots(&adjust_root_pointer_closure,
 347                               &adjust_code_pointer_closure,
 348                               &adjust_pointer_closure);
 349 
 350   adjust_marks();
 351   GenAdjustPointersClosure blk;
 352   gch->generation_iterate(&blk, true);
 353   pg->adjust_pointers();
 354 }
 355 
 356 class GenCompactClosure: public GenCollectedHeap::GenClosure {
 357 public:
 358   void do_generation(Generation* gen) {
 359     gen->compact();
 360   }
 361 };
 362 
 363 void GenMarkSweep::mark_sweep_phase4() {
 364   // All pointers are now adjusted, move objects accordingly
 365 
 366   // It is imperative that we traverse perm_gen first in phase4. All
 367   // classes must be allocated earlier than their instances, and traversing
 368   // perm_gen first makes sure that all klassOops have moved to their new
 369   // location before any instance does a dispatch through it's klass!
 370 
 371   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
 372   // in the same order in phase2, phase3 and phase4. We don't quite do that
 373   // here (perm_gen first rather than last), so we tell the validate code
 374   // to use a higher index (saved from phase2) when verifying perm_gen.
 375   GenCollectedHeap* gch = GenCollectedHeap::heap();
 376   Generation* pg = gch->perm_gen();
 377 
 378   EventMark m("4 compact heap");
 379   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
 380   trace("4");
 381 
 382   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(true));
 383 
 384   pg->compact();
 385 
 386   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
 387 
 388   GenCompactClosure blk;
 389   gch->generation_iterate(&blk, true);
 390 
 391   VALIDATE_MARK_SWEEP_ONLY(compaction_complete());
 392 
 393   pg->post_compact(); // Shared spaces verification.
 394 }