< prev index next >

src/share/vm/gc/serial/genMarkSweep.cpp

Print this page
rev 8615 : CMSParallelFullGC: Parallel version of CMS Full GC.


 136   }
 137 
 138   // refs processing: clean slate
 139   _ref_processor = NULL;
 140 
 141   // Update heap occupancy information which is used as
 142   // input to soft ref clearing policy at the next gc.
 143   Universe::update_heap_info_at_gc();
 144 
 145   // Update time of last gc for all generations we collected
 146   // (which currently is all the generations in the heap).
 147   // We need to use a monotonically non-decreasing time in ms
 148   // or we will see time-warp warnings and os::javaTimeMillis()
 149   // does not guarantee monotonicity.
 150   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 151   gch->update_time_of_last_gc(now);
 152 
 153   gch->trace_heap_after_gc(_gc_tracer);
 154 }
 155 



































 156 void GenMarkSweep::allocate_stacks() {
 157   GenCollectedHeap* gch = GenCollectedHeap::heap();
 158   // Scratch request on behalf of old generation; will do no allocation.
 159   ScratchBlock* scratch = gch->gather_scratch(gch->old_gen(), 0);
 160 
 161   // $$$ To cut a corner, we'll only use the first scratch block, and then
 162   // revert to malloc.
 163   if (scratch != NULL) {
 164     _preserved_count_max =
 165       scratch->num_words * HeapWordSize / sizeof(PreservedMark);
 166   } else {
 167     _preserved_count_max = 0;
 168   }
 169 
 170   _preserved_marks = (PreservedMark*)scratch;
 171   _preserved_count = 0;





































































 172 }
 173 
 174 
 175 void GenMarkSweep::deallocate_stacks() {
 176   if (!UseG1GC) {
 177     GenCollectedHeap* gch = GenCollectedHeap::heap();
 178     gch->release_scratch();
 179   }
 180 
 181   _preserved_mark_stack.clear(true);
 182   _preserved_oop_stack.clear(true);
 183   _marking_stack.clear();
 184   _objarray_stack.clear(true);

































































 185 }
 186 
 187 void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 188   // Recursively traverse all live objects and mark them
 189   GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());


 190 
 191   GenCollectedHeap* gch = GenCollectedHeap::heap();
 192 
 193   // Because follow_root_closure is created statically, cannot
 194   // use OopsInGenClosure constructor which takes a generation,
 195   // as the Universe has not been created when the static constructors
 196   // are run.
 197   follow_root_closure.set_orig_generation(gch->old_gen());
 198 
 199   // Need new claim bits before marking starts.
 200   ClassLoaderDataGraph::clear_claimed_marks();
 201 
 202   {



 203     StrongRootsScope srs(1);
 204 
 205     gch->gen_process_roots(&srs,
 206                            GenCollectedHeap::OldGen,
 207                            false, // Younger gens are not roots.
 208                            GenCollectedHeap::SO_None,
 209                            ClassUnloading,
 210                            &follow_root_closure,
 211                            &follow_root_closure,
 212                            &follow_cld_closure);

















 213   }
 214 


 215   // Process reference objects found during marking
 216   {


 217     ref_processor()->setup_policy(clear_all_softrefs);










 218     const ReferenceProcessorStats& stats =
 219       ref_processor()->process_discovered_references(
 220         &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
 221     gc_tracer()->report_gc_reference_stats(stats);
 222   }



 223 
 224   // This is the point where the entire marking should have completed.
 225   assert(_marking_stack.is_empty(), "Marking should have completed");
 226 
 227   // Unload classes and purge the SystemDictionary.
 228   bool purged_class = SystemDictionary::do_unloading(&is_alive);
 229 
 230   // Unload nmethods.
 231   CodeCache::do_unloading(&is_alive, purged_class);

 232 
 233   // Prune dead klasses from subklass/sibling/implementor lists.
 234   Klass::clean_weak_klass_links(&is_alive);
 235 
 236   // Delete entries for dead interned strings.
 237   StringTable::unlink(&is_alive);
 238 
 239   // Clean up unreferenced symbols in symbol table.
 240   SymbolTable::unlink();
 241 








 242   gc_tracer()->report_object_count_after_gc(&is_alive);
 243 }
 244 
 245 
 246 void GenMarkSweep::mark_sweep_phase2() {
 247   // Now all live objects are marked, compute the new object addresses.
 248 
 249   // It is imperative that we traverse perm_gen LAST. If dead space is
 250   // allowed a range of dead object may get overwritten by a dead int
 251   // array. If perm_gen is not traversed last a Klass* may get
 252   // overwritten. This is fine since it is dead, but if the class has dead
 253   // instances we have to skip them, and in order to find their size we
 254   // need the Klass*!
 255   //
 256   // It is not required that we traverse spaces in the same order in
 257   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 258   // tracking expects us to do so. See comment under phase4.
 259 
 260   GenCollectedHeap* gch = GenCollectedHeap::heap();
 261 
 262   GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());

 263 
 264   gch->prepare_for_compaction();
 265 }
 266 
 267 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure {
 268 public:
 269   void do_generation(Generation* gen) {





 270     gen->adjust_pointers();
 271   }
 272 };
 273 
 274 void GenMarkSweep::mark_sweep_phase3() {
 275   GenCollectedHeap* gch = GenCollectedHeap::heap();
 276 
 277   // Adjust the pointers to reflect the new locations
 278   GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 279 
 280   // Need new claim bits for the pointer adjustment tracing.
 281   ClassLoaderDataGraph::clear_claimed_marks();
 282 
 283   // Because the closure below is created statically, we cannot
 284   // use OopsInGenClosure constructor which takes a generation,
 285   // as the Universe has not been created when the static constructors
 286   // are run.
 287   adjust_pointer_closure.set_orig_generation(gch->old_gen());
 288 
 289   {




 290     StrongRootsScope srs(1);
 291 
 292     gch->gen_process_roots(&srs,
 293                            GenCollectedHeap::OldGen,
 294                            false, // Younger gens are not roots.
 295                            GenCollectedHeap::SO_AllCodeCache,
 296                            GenCollectedHeap::StrongAndWeakRoots,
 297                            &adjust_pointer_closure,
 298                            &adjust_pointer_closure,
 299                            &adjust_cld_closure);













 300   }
 301 






 302   gch->gen_process_weak_roots(&adjust_pointer_closure);

 303 




 304   adjust_marks();






 305   GenAdjustPointersClosure blk;
 306   gch->generation_iterate(&blk, true);

 307 }
 308 
 309 class GenCompactClosure: public GenCollectedHeap::GenClosure {
 310 public:
 311   void do_generation(Generation* gen) {
 312     gen->compact();
 313   }
 314 };
 315 
 316 void GenMarkSweep::mark_sweep_phase4() {
 317   // All pointers are now adjusted, move objects accordingly
 318 
 319   // It is imperative that we traverse perm_gen first in phase4. All
 320   // classes must be allocated earlier than their instances, and traversing
 321   // perm_gen first makes sure that all Klass*s have moved to their new
 322   // location before any instance does a dispatch through it's klass!
 323 
 324   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
 325   // in the same order in phase2, phase3 and phase4. We don't quite do that
 326   // here (perm_gen first rather than last), so we tell the validate code
 327   // to use a higher index (saved from phase2) when verifying perm_gen.
 328   GenCollectedHeap* gch = GenCollectedHeap::heap();
 329 
 330   GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());

 331 
 332   GenCompactClosure blk;
 333   gch->generation_iterate(&blk, true);
 334 }


 136   }
 137 
 138   // refs processing: clean slate
 139   _ref_processor = NULL;
 140 
 141   // Update heap occupancy information which is used as
 142   // input to soft ref clearing policy at the next gc.
 143   Universe::update_heap_info_at_gc();
 144 
 145   // Update time of last gc for all generations we collected
 146   // (which currently is all the generations in the heap).
 147   // We need to use a monotonically non-decreasing time in ms
 148   // or we will see time-warp warnings and os::javaTimeMillis()
 149   // does not guarantee monotonicity.
 150   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 151   gch->update_time_of_last_gc(now);
 152 
 153   gch->trace_heap_after_gc(_gc_tracer);
 154 }
 155 
 156 typedef OverflowTaskQueue<oop, mtGC>                      ObjTaskQueue;
 157 typedef GenericTaskQueueSet<ObjTaskQueue, mtGC>           ObjTaskQueueSet;
 158 typedef OverflowTaskQueue<ObjArrayTask, mtGC>             ObjArrayTaskQueue;
 159 typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC>      ObjArrayTaskQueueSet;
 160 
 161 ObjTaskQueueSet*      GenMarkSweep::_pms_task_queues = NULL;
 162 ObjArrayTaskQueueSet* GenMarkSweep::_pms_objarray_task_queues = NULL;
 163 ObjTaskQueue*         GenMarkSweep::_pms_vm_thread_task_queue = NULL;
 164 ObjArrayTaskQueue*    GenMarkSweep::_pms_vm_thread_objarray_task_queue = NULL;
 165 bool                  GenMarkSweep::_pms_task_queues_initialized = false;
 166 
 167 // Initialize data structures for PMS.
 168 void GenMarkSweep::initialize_pms_task_queues() {
 169   GenCollectedHeap* gch = GenCollectedHeap::heap();
 170   WorkGang* work_gang = gch->workers();
 171   int n_workers = work_gang->total_workers();
 172   _pms_task_queues = new ObjTaskQueueSet(n_workers);
 173   _pms_objarray_task_queues = new ObjArrayTaskQueueSet(n_workers);
 174 
 175   for (int i = 0; i < n_workers; i++) {
 176     ObjTaskQueue* q = new ObjTaskQueue();
 177     _pms_task_queues->register_queue(i, q);
 178     _pms_task_queues->queue(i)->initialize();
 179 
 180     ObjArrayTaskQueue* oaq = new ObjArrayTaskQueue();
 181     _pms_objarray_task_queues->register_queue(i, oaq);
 182     _pms_objarray_task_queues->queue(i)->initialize();
 183   }
 184 
 185   _pms_vm_thread_task_queue = new ObjTaskQueue();
 186   _pms_vm_thread_task_queue->initialize();
 187   _pms_vm_thread_objarray_task_queue = new ObjArrayTaskQueue();
 188   _pms_vm_thread_objarray_task_queue->initialize();
 189 }
 190 
 191 void GenMarkSweep::allocate_stacks() {
 192   GenCollectedHeap* gch = GenCollectedHeap::heap();
 193   // Scratch request on behalf of old generation; will do no allocation.
 194   ScratchBlock* scratch = gch->gather_scratch(gch->old_gen(), 0);
 195 
 196   // $$$ To cut a corner, we'll only use the first scratch block, and then
 197   // revert to malloc.
 198   if (scratch != NULL) {
 199     _preserved_count_max =
 200       scratch->num_words * HeapWordSize / sizeof(PreservedMark);
 201   } else {
 202     _preserved_count_max = 0;
 203   }
 204 
 205   _preserved_marks = (PreservedMark*)scratch;
 206   _preserved_count = 0;
 207 
 208   if (CMSParallelFullGC) {
 209     if (!_pms_task_queues_initialized) {
 210       _pms_task_queues_initialized = true;
 211       initialize_pms_task_queues();
 212     }
 213 
 214     // Split evenly the scratch memory among the vm thread and the
 215     // worker threads.
 216     WorkGang* work_gang = gch->workers();
 217     int n_workers = work_gang->total_workers();
 218     PreservedMark* preserved_marks_top = _preserved_marks;
 219     size_t preserved_count_max_per_thread = _preserved_count_max / (1 + n_workers);
 220 
 221     NamedThread* vm_thread = Thread::current()->as_Named_thread();
 222     assert(vm_thread->is_VM_thread(), "Must be run by the VM thread");
 223 
 224     vm_thread->_pms_task_queue = _pms_vm_thread_task_queue;
 225     vm_thread->_pms_objarray_task_queue = _pms_vm_thread_objarray_task_queue;
 226     // Assign the statically allocated data structures to the VM
 227     // thread and avoid allocating a new set for the VM thread.
 228     vm_thread->_pms_preserved_mark_stack = &_preserved_mark_stack;
 229     vm_thread->_pms_preserved_oop_stack = &_preserved_oop_stack;
 230     vm_thread->_pms_preserved_count = _preserved_count;
 231     vm_thread->_pms_preserved_count_max = preserved_count_max_per_thread;
 232     vm_thread->_pms_preserved_marks = preserved_marks_top;
 233 
 234     preserved_marks_top += preserved_count_max_per_thread;
 235 
 236     // allocate per-thread marking_stack and objarray_stack here.
 237     for (int i = 0; i < n_workers; i++) {
 238       GangWorker* worker = work_gang->gang_worker(i);
 239       // typedef to workaround NEW_C_HEAP_OBJ macro, which can not deal with ','
 240       typedef Stack<markOop, mtGC> GCMarkOopStack;
 241       typedef Stack<oop, mtGC> GCOopStack;
 242       // A ResourceStack might be a good choice here, but since there's no precedent of its
 243       // use anywhere else in HotSpot, it may not be reliable. Instead, allocate a Stack
 244       // with NEW_C_HEAP_OBJ, and call the constructor explicitly.
 245       worker->_pms_preserved_mark_stack = NEW_C_HEAP_OBJ(GCMarkOopStack, mtGC);
 246       new (worker->_pms_preserved_mark_stack) Stack<markOop, mtGC>();
 247       worker->_pms_preserved_oop_stack = NEW_C_HEAP_OBJ(GCOopStack, mtGC);
 248       new (worker->_pms_preserved_oop_stack) Stack<oop, mtGC>();
 249       worker->_pms_preserved_count = 0;
 250       worker->_pms_preserved_count_max = preserved_count_max_per_thread;
 251       worker->_pms_preserved_marks = preserved_marks_top;
 252       preserved_marks_top += preserved_count_max_per_thread;
 253     }
 254     // Note _preserved_marks and _preserved_count_max aren't directly used
 255     // by the marking code if CMSParallelFullGC.
 256     assert(preserved_marks_top <= _preserved_marks + _preserved_count_max,
 257            "buffer overrun");
 258 
 259     assert(_pms_mark_bit_map != NULL, "the mark bit map must be initialized at this point.");
 260     if (ShareCMSMarkBitMapWithParallelFullGC) {
 261       // Clear it before the GC because it's shared and can be dirty
 262       // here.
 263       _pms_mark_bit_map->clear();
 264     } else {
 265       // If the mark bit map isn't shared, clear it at the end of GC.
 266       assert(_pms_mark_bit_map->isAllClear(),
 267              "Must have been cleared at the last invocation or at initialization.");
 268     }
 269     _pms_mark_counter = 0;
 270 
 271     assert(_pms_region_array_set == NULL, "Must be NULL");
 272     // Create region arrays before marking.
 273     // We are in a ResourceMark in CMSCollector::do_collection().
 274     _pms_region_array_set = new PMSRegionArraySet();
 275   }
 276 }
 277 
 278 
 279 void GenMarkSweep::deallocate_stacks() {
 280   if (!UseG1GC) {
 281     GenCollectedHeap* gch = GenCollectedHeap::heap();
 282     gch->release_scratch();
 283   }
 284 
 285   _preserved_mark_stack.clear(true);
 286   _preserved_oop_stack.clear(true);
 287   _marking_stack.clear();
 288   _objarray_stack.clear(true);
 289 
 290   if (CMSParallelFullGC) {
 291     assert_marking_stack_empty();
 292 
 293     NamedThread* vm_thread = Thread::current()->as_Named_thread();
 294     assert(vm_thread->is_VM_thread(), "Must be run by the main CMS thread");
 295     vm_thread->reset_pms_data();
 296 
 297     // clear per-thread marking_stack and objarray_stack here.
 298     GenCollectedHeap* gch = GenCollectedHeap::heap();
 299     WorkGang* work_gang = gch->workers();
 300     int n_workers = work_gang->total_workers();
 301 
 302     for (int i = 0; i < n_workers; i++) {
 303       GangWorker* worker = work_gang->gang_worker(i);
 304       // typedef to workaround FREE_C_HEAP_ARRAY macro, which can not deal
 305       // with ','
 306       typedef Stack<markOop, mtGC> GCMarkOopStack;
 307       typedef Stack<oop, mtGC> GCOopStack;
 308       // Call the Stack destructor which is the clear function
 309       // since FREE_C_HEAP_ARRAY doesn't.
 310       ((Stack<markOop, mtGC>*)worker->_pms_preserved_mark_stack)->clear(true);
 311       ((Stack<oop, mtGC>*)worker->_pms_preserved_oop_stack)->clear(true);
 312       // Free the allocated memory
 313       FREE_C_HEAP_ARRAY(GCMarkOopStack, worker->_pms_preserved_mark_stack);
 314       FREE_C_HEAP_ARRAY(GCOopStack, worker->_pms_preserved_oop_stack);
 315       worker->_pms_preserved_mark_stack = NULL;
 316       worker->_pms_preserved_oop_stack = NULL;
 317 
 318       worker->reset_pms_data();
 319     }
 320 
 321     if (!ShareCMSMarkBitMapWithParallelFullGC) {
 322       _pms_mark_bit_map->clear();
 323     }
 324     _pms_region_array_set->cleanup();
 325     _pms_region_array_set = NULL;
 326     _pms_mark_counter = 0;
 327   }
 328 }
 329 
 330 void GenMarkSweep::assert_marking_stack_empty() {
 331 #ifdef ASSERT
 332   if (!CMSParallelFullGC) {
 333     assert(_marking_stack.is_empty(), "just drained");
 334     assert(_objarray_stack.is_empty(), "just drained");
 335   } else {
 336     NamedThread* thr = Thread::current()->as_Named_thread();
 337 
 338     assert(thr->is_VM_thread(), "Must be run by the main CMS thread");
 339     assert(((ObjTaskQueue*)thr->_pms_task_queue)->is_empty(), "just drained");
 340     assert(((ObjArrayTaskQueue*)thr->_pms_objarray_task_queue)->is_empty(), "just drained");
 341 
 342     // Check that all the per-thread marking stacks are empty here.
 343     GenCollectedHeap* gch = GenCollectedHeap::heap();
 344     WorkGang* work_gang = gch->workers();
 345     int n_workers = work_gang->total_workers();
 346 
 347     for (int i = 0; i < n_workers; i++) {
 348       GangWorker* worker = work_gang->gang_worker(i);
 349       assert(((ObjTaskQueue*)worker->_pms_task_queue)->is_empty(), "just drained");
 350       assert(((ObjArrayTaskQueue*)worker->_pms_objarray_task_queue)->is_empty(), "just drained");
 351     }
 352   }
 353 #endif // ASSERT
 354 }
 355 
 356 void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 357   // Recursively traverse all live objects and mark them
 358   GCTraceTime tm("phase 1",
 359                  PrintGC && (Verbose || LogCMSParallelFullGC),
 360                  true, _gc_timer, _gc_tracer->gc_id());
 361 
 362   GenCollectedHeap* gch = GenCollectedHeap::heap();
 363 
 364   // Because follow_root_closure is created statically, cannot
 365   // use OopsInGenClosure constructor which takes a generation,
 366   // as the Universe has not been created when the static constructors
 367   // are run.
 368   follow_root_closure.set_orig_generation(gch->old_gen());
 369 
 370   // Need new claim bits before marking starts.
 371   ClassLoaderDataGraph::clear_claimed_marks();
 372 
 373   {
 374     GCTraceTime tm1("marking", PrintGC && (Verbose || LogCMSParallelFullGC),
 375                     true, NULL, _gc_tracer->gc_id());
 376     if (!CMSParallelFullGC) {
 377       StrongRootsScope srs(1);
 378 
 379       gch->gen_process_roots(&srs,
 380                              GenCollectedHeap::OldGen,
 381                              false, // Younger gens are not roots.
 382                              GenCollectedHeap::SO_None,
 383                              ClassUnloading,
 384                              &follow_root_closure,
 385                              &follow_root_closure,
 386                              &follow_cld_closure);
 387     } else {
 388       GenCollectedHeap* gch = GenCollectedHeap::heap();
 389       WorkGang* workers = gch->workers();
 390       assert(workers != NULL, "Need parallel worker threads.");
 391       int n_workers = workers->active_workers();
 392 
 393       StrongRootsScope srs(n_workers);
 394       PMSMarkTask tsk(&srs, workers, _pms_task_queues, _pms_objarray_task_queues);
 395       if (n_workers > 1) {
 396         // Make sure refs discovery MT-safe
 397         assert(ref_processor()->discovery_is_mt(),
 398                "Ref discovery must already be set to MT-safe");
 399         workers->run_task(&tsk);
 400       } else {
 401         tsk.work(0);
 402       }
 403     }
 404   }
 405 
 406   assert_marking_stack_empty();
 407 
 408   // Process reference objects found during marking
 409   {
 410     GCTraceTime tm2("ref processing", PrintGC && (Verbose || LogCMSParallelFullGC),
 411                     true, NULL, _gc_tracer->gc_id());
 412     ref_processor()->setup_policy(clear_all_softrefs);
 413 
 414     if (ref_processor()->processing_is_mt()) {
 415       assert(CMSParallelFullGC, "CMSParallelFullGC must be true");
 416       PMSRefProcTaskExecutor task_executor(_pms_task_queues, _pms_objarray_task_queues);
 417       const ReferenceProcessorStats& stats =
 418         ref_processor()->process_discovered_references(
 419           &is_alive, &keep_alive, &follow_stack_closure, &task_executor, _gc_timer, _gc_tracer->gc_id());
 420       gc_tracer()->report_gc_reference_stats(stats);
 421     } else {
 422       assert(!CMSParallelFullGC, "CMSParallelFullGC must be false");
 423       const ReferenceProcessorStats& stats =
 424         ref_processor()->process_discovered_references(
 425           &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
 426       gc_tracer()->report_gc_reference_stats(stats);
 427     }
 428   }
 429   GCTraceTime tm3("class unloading", PrintGC && (Verbose || LogCMSParallelFullGC),
 430                   true, NULL, _gc_tracer->gc_id());
 431 
 432   // This is the point where the entire marking should have completed.
 433   assert_marking_stack_empty();
 434 
 435   // Unload classes and purge the SystemDictionary.
 436   bool purged_class = SystemDictionary::do_unloading(&is_alive);
 437 
 438   // Unload nmethods.
 439   CodeCache::do_unloading(&is_alive, purged_class);
 440   assert_marking_stack_empty();
 441 
 442   // Prune dead klasses from subklass/sibling/implementor lists.
 443   Klass::clean_weak_klass_links(&is_alive);
 444 
 445   // Delete entries for dead interned strings.
 446   StringTable::unlink(&is_alive);
 447 
 448   // Clean up unreferenced symbols in symbol table.
 449   SymbolTable::unlink();
 450 
 451 #ifdef ASSERT
 452   if (CMSParallelFullGC) {
 453     // This is expensive!  Verify that the region live sizes computed
 454     // during marking match what the mark bit map says.
 455     MarkSweep::pms_region_array_set()->verify_live_size();
 456   }
 457 #endif
 458 
 459   gc_tracer()->report_object_count_after_gc(&is_alive);
 460 }
 461 
 462 
 463 void GenMarkSweep::mark_sweep_phase2() {
 464   // Now all live objects are marked, compute the new object addresses.
 465 
 466   // It is imperative that we traverse perm_gen LAST. If dead space is
 467   // allowed a range of dead object may get overwritten by a dead int
 468   // array. If perm_gen is not traversed last a Klass* may get
 469   // overwritten. This is fine since it is dead, but if the class has dead
 470   // instances we have to skip them, and in order to find their size we
 471   // need the Klass*!
 472   //
 473   // It is not required that we traverse spaces in the same order in
 474   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 475   // tracking expects us to do so. See comment under phase4.
 476 
 477   GenCollectedHeap* gch = GenCollectedHeap::heap();
 478 
 479   GCTraceTime tm("phase 2", PrintGC && (Verbose || LogCMSParallelFullGC),
 480                  true, _gc_timer, _gc_tracer->gc_id());
 481 
 482   gch->prepare_for_compaction();
 483 }
 484 
 485 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure {
 486 public:
 487   void do_generation(Generation* gen) {
 488     GCTraceTime tm("per-gen-adjust", PrintGC && (Verbose || LogCMSParallelFullGC),
 489                    true, NULL, GCId::peek());
 490     if (LogCMSParallelFullGC) {
 491       gclog_or_tty->print_cr("%s", gen->name());
 492     }
 493     gen->adjust_pointers();
 494   }
 495 };
 496 
 497 void GenMarkSweep::mark_sweep_phase3() {
 498   GenCollectedHeap* gch = GenCollectedHeap::heap();
 499 
 500   // Adjust the pointers to reflect the new locations
 501   GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 502 
 503   // Need new claim bits for the pointer adjustment tracing.
 504   ClassLoaderDataGraph::clear_claimed_marks();
 505 
 506   // Because the closure below is created statically, we cannot
 507   // use OopsInGenClosure constructor which takes a generation,
 508   // as the Universe has not been created when the static constructors
 509   // are run.
 510   adjust_pointer_closure.set_orig_generation(gch->old_gen());
 511 
 512   {
 513     GCTraceTime tm("adjust-strong-roots",
 514                    (PrintGC && Verbose) || LogCMSParallelFullGC,
 515                    true, NULL, _gc_tracer->gc_id());
 516     if (!CMSParallelFullGC) {
 517       StrongRootsScope srs(1);

 518       gch->gen_process_roots(&srs,
 519                              GenCollectedHeap::OldGen,
 520                              false, // Younger gens are not roots.
 521                              GenCollectedHeap::SO_AllCodeCache,
 522                              GenCollectedHeap::StrongAndWeakRoots,
 523                              &adjust_pointer_closure,
 524                              &adjust_pointer_closure,
 525                              &adjust_cld_closure);
 526     } else {
 527       WorkGang* workers = gch->workers();
 528       assert(workers != NULL, "Need parallel worker threads.");
 529       int n_workers = workers->total_workers();
 530       StrongRootsScope srs(n_workers);
 531       PMSAdjustRootsTask tsk(&srs, workers);
 532       // Set up for parallel process_strong_roots work.
 533       if (n_workers > 1) {
 534         workers->run_task(&tsk);
 535       } else {
 536         tsk.work(0);
 537       }
 538     }
 539   }
 540 
 541   {
 542     GCTraceTime tm("adjust-weak-roots",
 543                    PrintGC && (Verbose || LogCMSParallelFullGC),
 544                    true, NULL, _gc_tracer->gc_id());
 545     // Now adjust pointers in remaining weak roots.  (All of which should
 546     // have been cleared if they pointed to non-surviving objects.)
 547     gch->gen_process_weak_roots(&adjust_pointer_closure);
 548   }
 549 
 550   {
 551     GCTraceTime tm("adjust-preserved-marks",
 552                    PrintGC && (Verbose || LogCMSParallelFullGC),
 553                    true, NULL, _gc_tracer->gc_id());
 554     adjust_marks();
 555   }
 556 
 557   {
 558     GCTraceTime tm("adjust-heap",
 559                    PrintGC && (Verbose || LogCMSParallelFullGC),
 560                    true, NULL, _gc_tracer->gc_id());
 561     GenAdjustPointersClosure blk;
 562     gch->generation_iterate(&blk, true);
 563   }
 564 }
 565 
 566 class GenCompactClosure: public GenCollectedHeap::GenClosure {
 567 public:
 568   void do_generation(Generation* gen) {
 569     gen->compact();
 570   }
 571 };
 572 
 573 void GenMarkSweep::mark_sweep_phase4() {
 574   // All pointers are now adjusted, move objects accordingly
 575 
 576   // It is imperative that we traverse perm_gen first in phase4. All
 577   // classes must be allocated earlier than their instances, and traversing
 578   // perm_gen first makes sure that all Klass*s have moved to their new
 579   // location before any instance does a dispatch through it's klass!
 580 
 581   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
 582   // in the same order in phase2, phase3 and phase4. We don't quite do that
 583   // here (perm_gen first rather than last), so we tell the validate code
 584   // to use a higher index (saved from phase2) when verifying perm_gen.
 585   GenCollectedHeap* gch = GenCollectedHeap::heap();
 586 
 587   GCTraceTime tm("phase 4", PrintGC && (Verbose || LogCMSParallelFullGC),
 588                  true, _gc_timer, _gc_tracer->gc_id());
 589 
 590   GenCompactClosure blk;
 591   gch->generation_iterate(&blk, true);
 592 }
< prev index next >