src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6083 : 8029075: String deduplication in G1
Implementation of JEP 192, http://openjdk.java.net/jeps/192


  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  29 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  30 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  31 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  32 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  36 #include "gc_implementation/g1/g1EvacFailure.hpp"
  37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  38 #include "gc_implementation/g1/g1Log.hpp"
  39 #include "gc_implementation/g1/g1MarkSweep.hpp"
  40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"

  42 #include "gc_implementation/g1/g1YCTypes.hpp"
  43 #include "gc_implementation/g1/heapRegion.inline.hpp"
  44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  46 #include "gc_implementation/g1/vm_operations_g1.hpp"
  47 #include "gc_implementation/shared/gcHeapSummary.hpp"
  48 #include "gc_implementation/shared/gcTimer.hpp"
  49 #include "gc_implementation/shared/gcTrace.hpp"
  50 #include "gc_implementation/shared/gcTraceTime.hpp"
  51 #include "gc_implementation/shared/isGCActiveMark.hpp"
  52 #include "memory/gcLocker.inline.hpp"
  53 #include "memory/generationSpec.hpp"
  54 #include "memory/iterator.hpp"
  55 #include "memory/referenceProcessor.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/oop.pcgc.inline.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "utilities/ticks.hpp"
  60 
  61 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;


2164   // G1AllocRegion class. If we don't pass an address in the reserved
2165   // space here, lots of asserts fire.
2166 
2167   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2168                                              _g1_reserved.start());
2169   // We'll re-use the same region whether the alloc region will
2170   // require BOT updates or not and, if it doesn't, then a non-young
2171   // region will complain that it cannot support allocations without
2172   // BOT updates. So we'll tag the dummy region as young to avoid that.
2173   dummy_region->set_young();
2174   // Make sure it's full.
2175   dummy_region->set_top(dummy_region->end());
2176   G1AllocRegion::setup(this, dummy_region);
2177 
2178   init_mutator_alloc_region();
2179 
2180   // Do create of the monitoring and management support so that
2181   // values in the heap have been properly initialized.
2182   _g1mm = new G1MonitoringSupport(this);
2183 


2184   return JNI_OK;
2185 }
2186 
2187 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2188   return HeapRegion::max_region_size();
2189 }
2190 
2191 void G1CollectedHeap::ref_processing_init() {
2192   // Reference processing in G1 currently works as follows:
2193   //
2194   // * There are two reference processor instances. One is
2195   //   used to record and process discovered references
2196   //   during concurrent marking; the other is used to
2197   //   record and process references during STW pauses
2198   //   (both full and incremental).
2199   // * Both ref processors need to 'span' the entire heap as
2200   //   the regions in the collection set may be dotted around.
2201   //
2202   // * For the concurrent marking ref processor:
2203   //   * Reference discovery is enabled at initial marking.


3444 
3445       // Checks that the expected amount of parallel work was done.
3446       // The implication is that n_workers is > 0.
3447       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3448              "sanity check");
3449 
3450       reset_heap_region_claim_values();
3451 
3452       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3453              "sanity check");
3454     } else {
3455       VerifyRegionClosure blk(false, vo);
3456       heap_region_iterate(&blk);
3457       if (blk.failures()) {
3458         failures = true;
3459       }
3460     }
3461     if (!silent) gclog_or_tty->print("RemSet ");
3462     rem_set()->verify();
3463 





3464     if (failures) {
3465       gclog_or_tty->print_cr("Heap:");
3466       // It helps to have the per-region information in the output to
3467       // help us track down what went wrong. This is why we call
3468       // print_extended_on() instead of print_on().
3469       print_extended_on(gclog_or_tty);
3470       gclog_or_tty->print_cr("");
3471 #ifndef PRODUCT
3472       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3473         concurrent_mark()->print_reachable("at-verification-failure",
3474                                            vo, false /* all */);
3475       }
3476 #endif
3477       gclog_or_tty->flush();
3478     }
3479     guarantee(!failures, "there should not have been any failures");
3480   } else {
3481     if (!silent)
3482       gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");





3483   }
3484 }
3485 
3486 void G1CollectedHeap::verify(bool silent) {
3487   verify(silent, VerifyOption_G1UsePrevMarking);
3488 }
3489 
3490 double G1CollectedHeap::verify(bool guard, const char* msg) {
3491   double verify_time_ms = 0.0;
3492 
3493   if (guard && total_collections() >= VerifyGCStartAt) {
3494     double verify_start = os::elapsedTime();
3495     HandleMark hm;  // Discard invalid handles created during verification
3496     prepare_for_verify();
3497     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3498     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3499   }
3500 
3501   return verify_time_ms;
3502 }


3555   heap_region_iterate(&blk);
3556 }
3557 
3558 void G1CollectedHeap::print_on_error(outputStream* st) const {
3559   this->CollectedHeap::print_on_error(st);
3560 
3561   if (_cm != NULL) {
3562     st->cr();
3563     _cm->print_on_error(st);
3564   }
3565 }
3566 
3567 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3568   if (G1CollectedHeap::use_parallel_gc_threads()) {
3569     workers()->print_worker_threads_on(st);
3570   }
3571   _cmThread->print_on(st);
3572   st->cr();
3573   _cm->print_worker_threads_on(st);
3574   _cg1r->print_worker_threads_on(st);



3575 }
3576 
3577 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3578   if (G1CollectedHeap::use_parallel_gc_threads()) {
3579     workers()->threads_do(tc);
3580   }
3581   tc->do_thread(_cmThread);
3582   _cg1r->threads_do(tc);



3583 }
3584 
3585 void G1CollectedHeap::print_tracing_info() const {
3586   // We'll overload this to mean "trace GC pause statistics."
3587   if (TraceGen0Time || TraceGen1Time) {
3588     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3589     // to that.
3590     g1_policy()->print_tracing_info();
3591   }
3592   if (G1SummarizeRSetStats) {
3593     g1_rem_set()->print_summary_info();
3594   }
3595   if (G1SummarizeConcMark) {
3596     concurrent_mark()->print_summary_info();
3597   }
3598   g1_policy()->print_yg_surv_rate_info();
3599   SpecializationStats::print();
3600 }
3601 
3602 #ifndef PRODUCT


4741       // age on the mark word, when the object does not have a
4742       // displaced mark word. We're not expecting many objects to have
4743       // a displaced marked word, so that case is not optimized
4744       // further (it could be...) and we simply call obj->incr_age().
4745 
4746       if (m->has_displaced_mark_helper()) {
4747         // in this case, we have to install the mark word first,
4748         // otherwise obj looks to be forwarded (the old mark word,
4749         // which contains the forward pointer, was copied)
4750         obj->set_mark(m);
4751         obj->incr_age();
4752       } else {
4753         m = m->incr_age();
4754         obj->set_mark(m);
4755       }
4756       age_table()->add(obj, word_sz);
4757     } else {
4758       obj->set_mark(m);
4759     }
4760 







4761     size_t* surv_young_words = surviving_young_words();
4762     surv_young_words[young_index] += word_sz;
4763 
4764     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4765       // We keep track of the next start index in the length field of
4766       // the to-space object. The actual length can be found in the
4767       // length field of the from-space object.
4768       arrayOop(obj)->set_length(0);
4769       oop* old_p = set_partial_array_mask(old);
4770       push_on_queue(old_p);
4771     } else {
4772       // No point in using the slower heap_region_containing() method,
4773       // given that we know obj is in the heap.
4774       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4775       obj->oop_iterate_backwards(&_scanner);
4776     }
4777   } else {
4778     undo_allocation(alloc_purpose, obj_ptr, word_sz);
4779     obj = forward_ptr;
4780   }


5257 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5258                                                      bool process_strings, bool process_symbols) {
5259   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5260                    _g1h->workers()->active_workers() : 1);
5261 
5262   G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5263   if (G1CollectedHeap::use_parallel_gc_threads()) {
5264     set_par_threads(n_workers);
5265     workers()->run_task(&g1_unlink_task);
5266     set_par_threads(0);
5267   } else {
5268     g1_unlink_task.work(0);
5269   }
5270   if (G1TraceStringSymbolTableScrubbing) {
5271     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5272                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5273                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5274                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5275                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5276   }




5277 }
5278 
5279 // Weak Reference Processing support
5280 
5281 // An always "is_alive" closure that is used to preserve referents.
5282 // If the object is non-null then it's alive.  Used in the preservation
5283 // of referent objects that are pointed to by reference objects
5284 // discovered by the CM ref processor.
5285 class G1AlwaysAliveClosure: public BoolObjectClosure {
5286   G1CollectedHeap* _g1;
5287 public:
5288   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5289   bool do_object_b(oop p) {
5290     if (p != NULL) {
5291       return true;
5292     }
5293     return false;
5294   }
5295 };
5296 


5870   g1_policy()->phase_times()->record_par_time(par_time_ms);
5871 
5872   double code_root_fixup_time_ms =
5873         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5874   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5875 
5876   set_par_threads(0);
5877 
5878   // Process any discovered reference objects - we have
5879   // to do this _before_ we retire the GC alloc regions
5880   // as we may have to copy some 'reachable' referent
5881   // objects (and their reachable sub-graphs) that were
5882   // not copied during the pause.
5883   process_discovered_references(n_workers);
5884 
5885   // Weak root processing.
5886   {
5887     G1STWIsAliveClosure is_alive(this);
5888     G1KeepAliveClosure keep_alive(this);
5889     JNIHandles::weak_oops_do(&is_alive, &keep_alive);



5890   }
5891 
5892   release_gc_alloc_regions(n_workers, evacuation_info);
5893   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5894 
5895   // Reset and re-enable the hot card cache.
5896   // Note the counts for the cards in the regions in the
5897   // collection set are reset when the collection set is freed.
5898   hot_card_cache->reset_hot_cache();
5899   hot_card_cache->set_use_cache(true);
5900 
5901   // Migrate the strong code roots attached to each region in
5902   // the collection set. Ideally we would like to do this
5903   // after we have finished the scanning/evacuation of the
5904   // strong code roots for a particular heap region.
5905   migrate_strong_code_roots();
5906 
5907   if (g1_policy()->during_initial_mark_pause()) {
5908     // Reset the claim values set during marking the strong code roots
5909     reset_heap_region_claim_values();


6353       // humongous region set
6354     } else {
6355       // The rest should be old
6356       _old_set->remove(r);
6357     }
6358     return false;
6359   }
6360 
6361   ~TearDownRegionSetsClosure() {
6362     assert(_old_set->is_empty(), "post-condition");
6363   }
6364 };
6365 
6366 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6367   assert_at_safepoint(true /* should_be_vm_thread */);
6368 
6369   if (!free_list_only) {
6370     TearDownRegionSetsClosure cl(&_old_set);
6371     heap_region_iterate(&cl);
6372 
6373     // Need to do this after the heap iteration to be able to
6374     // recognize the young regions and ignore them during the iteration.
6375     _young_list->empty_list();

6376   }
6377   _free_list.remove_all();
6378 }
6379 
6380 class RebuildRegionSetsClosure : public HeapRegionClosure {
6381 private:
6382   bool            _free_list_only;
6383   HeapRegionSet*   _old_set;
6384   FreeRegionList* _free_list;
6385   size_t          _total_used;
6386 
6387 public:
6388   RebuildRegionSetsClosure(bool free_list_only,
6389                            HeapRegionSet* old_set, FreeRegionList* free_list) :
6390     _free_list_only(free_list_only),
6391     _old_set(old_set), _free_list(free_list), _total_used(0) {
6392     assert(_free_list->is_empty(), "pre-condition");
6393     if (!free_list_only) {
6394       assert(_old_set->is_empty(), "pre-condition");
6395     }


6409       if (r->isHumongous()) {
6410         // We ignore humongous regions, we left the humongous set unchanged
6411       } else {
6412         // The rest should be old, add them to the old set
6413         _old_set->add(r);
6414       }
6415       _total_used += r->used();
6416     }
6417 
6418     return false;
6419   }
6420 
6421   size_t total_used() {
6422     return _total_used;
6423   }
6424 };
6425 
6426 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6427   assert_at_safepoint(true /* should_be_vm_thread */);
6428 




6429   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6430   heap_region_iterate(&cl);
6431 
6432   if (!free_list_only) {
6433     _summary_bytes_used = cl.total_used();
6434   }
6435   assert(_summary_bytes_used == recalculate_used(),
6436          err_msg("inconsistent _summary_bytes_used, "
6437                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6438                  _summary_bytes_used, recalculate_used()));
6439 }
6440 
6441 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6442   _refine_cte_cl->set_concurrent(concurrent);
6443 }
6444 
6445 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6446   HeapRegion* hr = heap_region_containing(p);
6447   if (hr == NULL) {
6448     return false;




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  29 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  30 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  31 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  32 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  36 #include "gc_implementation/g1/g1EvacFailure.hpp"
  37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  38 #include "gc_implementation/g1/g1Log.hpp"
  39 #include "gc_implementation/g1/g1MarkSweep.hpp"
  40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  42 #include "gc_implementation/g1/g1StringDedup.hpp"
  43 #include "gc_implementation/g1/g1YCTypes.hpp"
  44 #include "gc_implementation/g1/heapRegion.inline.hpp"
  45 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  46 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  47 #include "gc_implementation/g1/vm_operations_g1.hpp"
  48 #include "gc_implementation/shared/gcHeapSummary.hpp"
  49 #include "gc_implementation/shared/gcTimer.hpp"
  50 #include "gc_implementation/shared/gcTrace.hpp"
  51 #include "gc_implementation/shared/gcTraceTime.hpp"
  52 #include "gc_implementation/shared/isGCActiveMark.hpp"
  53 #include "memory/gcLocker.inline.hpp"
  54 #include "memory/generationSpec.hpp"
  55 #include "memory/iterator.hpp"
  56 #include "memory/referenceProcessor.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/oop.pcgc.inline.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "utilities/ticks.hpp"
  61 
  62 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;


2165   // G1AllocRegion class. If we don't pass an address in the reserved
2166   // space here, lots of asserts fire.
2167 
2168   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2169                                              _g1_reserved.start());
2170   // We'll re-use the same region whether the alloc region will
2171   // require BOT updates or not and, if it doesn't, then a non-young
2172   // region will complain that it cannot support allocations without
2173   // BOT updates. So we'll tag the dummy region as young to avoid that.
2174   dummy_region->set_young();
2175   // Make sure it's full.
2176   dummy_region->set_top(dummy_region->end());
2177   G1AllocRegion::setup(this, dummy_region);
2178 
2179   init_mutator_alloc_region();
2180 
2181   // Do create of the monitoring and management support so that
2182   // values in the heap have been properly initialized.
2183   _g1mm = new G1MonitoringSupport(this);
2184 
2185   G1StringDedup::initialize();
2186 
2187   return JNI_OK;
2188 }
2189 
2190 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2191   return HeapRegion::max_region_size();
2192 }
2193 
2194 void G1CollectedHeap::ref_processing_init() {
2195   // Reference processing in G1 currently works as follows:
2196   //
2197   // * There are two reference processor instances. One is
2198   //   used to record and process discovered references
2199   //   during concurrent marking; the other is used to
2200   //   record and process references during STW pauses
2201   //   (both full and incremental).
2202   // * Both ref processors need to 'span' the entire heap as
2203   //   the regions in the collection set may be dotted around.
2204   //
2205   // * For the concurrent marking ref processor:
2206   //   * Reference discovery is enabled at initial marking.


3447 
3448       // Checks that the expected amount of parallel work was done.
3449       // The implication is that n_workers is > 0.
3450       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3451              "sanity check");
3452 
3453       reset_heap_region_claim_values();
3454 
3455       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3456              "sanity check");
3457     } else {
3458       VerifyRegionClosure blk(false, vo);
3459       heap_region_iterate(&blk);
3460       if (blk.failures()) {
3461         failures = true;
3462       }
3463     }
3464     if (!silent) gclog_or_tty->print("RemSet ");
3465     rem_set()->verify();
3466 
3467     if (G1StringDedup::is_enabled()) {
3468       if (!silent) gclog_or_tty->print("StrDedup ");
3469       G1StringDedup::verify();
3470     }
3471 
3472     if (failures) {
3473       gclog_or_tty->print_cr("Heap:");
3474       // It helps to have the per-region information in the output to
3475       // help us track down what went wrong. This is why we call
3476       // print_extended_on() instead of print_on().
3477       print_extended_on(gclog_or_tty);
3478       gclog_or_tty->print_cr("");
3479 #ifndef PRODUCT
3480       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3481         concurrent_mark()->print_reachable("at-verification-failure",
3482                                            vo, false /* all */);
3483       }
3484 #endif
3485       gclog_or_tty->flush();
3486     }
3487     guarantee(!failures, "there should not have been any failures");
3488   } else {
3489     if (!silent) {
3490       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3491       if (G1StringDedup::is_enabled()) {
3492         gclog_or_tty->print(", StrDedup");
3493       }
3494       gclog_or_tty->print(") ");
3495     }
3496   }
3497 }
3498 
3499 void G1CollectedHeap::verify(bool silent) {
3500   verify(silent, VerifyOption_G1UsePrevMarking);
3501 }
3502 
3503 double G1CollectedHeap::verify(bool guard, const char* msg) {
3504   double verify_time_ms = 0.0;
3505 
3506   if (guard && total_collections() >= VerifyGCStartAt) {
3507     double verify_start = os::elapsedTime();
3508     HandleMark hm;  // Discard invalid handles created during verification
3509     prepare_for_verify();
3510     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3511     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3512   }
3513 
3514   return verify_time_ms;
3515 }


3568   heap_region_iterate(&blk);
3569 }
3570 
3571 void G1CollectedHeap::print_on_error(outputStream* st) const {
3572   this->CollectedHeap::print_on_error(st);
3573 
3574   if (_cm != NULL) {
3575     st->cr();
3576     _cm->print_on_error(st);
3577   }
3578 }
3579 
3580 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3581   if (G1CollectedHeap::use_parallel_gc_threads()) {
3582     workers()->print_worker_threads_on(st);
3583   }
3584   _cmThread->print_on(st);
3585   st->cr();
3586   _cm->print_worker_threads_on(st);
3587   _cg1r->print_worker_threads_on(st);
3588   if (G1StringDedup::is_enabled()) {
3589     G1StringDedup::print_worker_threads_on(st);
3590   }
3591 }
3592 
3593 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3594   if (G1CollectedHeap::use_parallel_gc_threads()) {
3595     workers()->threads_do(tc);
3596   }
3597   tc->do_thread(_cmThread);
3598   _cg1r->threads_do(tc);
3599   if (G1StringDedup::is_enabled()) {
3600     G1StringDedup::threads_do(tc);
3601   }
3602 }
3603 
3604 void G1CollectedHeap::print_tracing_info() const {
3605   // We'll overload this to mean "trace GC pause statistics."
3606   if (TraceGen0Time || TraceGen1Time) {
3607     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3608     // to that.
3609     g1_policy()->print_tracing_info();
3610   }
3611   if (G1SummarizeRSetStats) {
3612     g1_rem_set()->print_summary_info();
3613   }
3614   if (G1SummarizeConcMark) {
3615     concurrent_mark()->print_summary_info();
3616   }
3617   g1_policy()->print_yg_surv_rate_info();
3618   SpecializationStats::print();
3619 }
3620 
3621 #ifndef PRODUCT


4760       // age on the mark word, when the object does not have a
4761       // displaced mark word. We're not expecting many objects to have
4762       // a displaced marked word, so that case is not optimized
4763       // further (it could be...) and we simply call obj->incr_age().
4764 
4765       if (m->has_displaced_mark_helper()) {
4766         // in this case, we have to install the mark word first,
4767         // otherwise obj looks to be forwarded (the old mark word,
4768         // which contains the forward pointer, was copied)
4769         obj->set_mark(m);
4770         obj->incr_age();
4771       } else {
4772         m = m->incr_age();
4773         obj->set_mark(m);
4774       }
4775       age_table()->add(obj, word_sz);
4776     } else {
4777       obj->set_mark(m);
4778     }
4779 
4780     if (G1StringDedup::is_enabled()) {
4781       G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
4782                                              to_region->is_young(),
4783                                              queue_num(),
4784                                              obj);
4785     }
4786 
4787     size_t* surv_young_words = surviving_young_words();
4788     surv_young_words[young_index] += word_sz;
4789 
4790     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4791       // We keep track of the next start index in the length field of
4792       // the to-space object. The actual length can be found in the
4793       // length field of the from-space object.
4794       arrayOop(obj)->set_length(0);
4795       oop* old_p = set_partial_array_mask(old);
4796       push_on_queue(old_p);
4797     } else {
4798       // No point in using the slower heap_region_containing() method,
4799       // given that we know obj is in the heap.
4800       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4801       obj->oop_iterate_backwards(&_scanner);
4802     }
4803   } else {
4804     undo_allocation(alloc_purpose, obj_ptr, word_sz);
4805     obj = forward_ptr;
4806   }


5283 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5284                                                      bool process_strings, bool process_symbols) {
5285   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5286                    _g1h->workers()->active_workers() : 1);
5287 
5288   G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5289   if (G1CollectedHeap::use_parallel_gc_threads()) {
5290     set_par_threads(n_workers);
5291     workers()->run_task(&g1_unlink_task);
5292     set_par_threads(0);
5293   } else {
5294     g1_unlink_task.work(0);
5295   }
5296   if (G1TraceStringSymbolTableScrubbing) {
5297     gclog_or_tty->print_cr("Cleaned string and symbol table, "
5298                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5299                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5300                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5301                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5302   }
5303 
5304   if (G1StringDedup::is_enabled()) {
5305     G1StringDedup::unlink(is_alive);
5306   }
5307 }
5308 
5309 // Weak Reference Processing support
5310 
5311 // An always "is_alive" closure that is used to preserve referents.
5312 // If the object is non-null then it's alive.  Used in the preservation
5313 // of referent objects that are pointed to by reference objects
5314 // discovered by the CM ref processor.
5315 class G1AlwaysAliveClosure: public BoolObjectClosure {
5316   G1CollectedHeap* _g1;
5317 public:
5318   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5319   bool do_object_b(oop p) {
5320     if (p != NULL) {
5321       return true;
5322     }
5323     return false;
5324   }
5325 };
5326 


5900   g1_policy()->phase_times()->record_par_time(par_time_ms);
5901 
5902   double code_root_fixup_time_ms =
5903         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5904   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5905 
5906   set_par_threads(0);
5907 
5908   // Process any discovered reference objects - we have
5909   // to do this _before_ we retire the GC alloc regions
5910   // as we may have to copy some 'reachable' referent
5911   // objects (and their reachable sub-graphs) that were
5912   // not copied during the pause.
5913   process_discovered_references(n_workers);
5914 
5915   // Weak root processing.
5916   {
5917     G1STWIsAliveClosure is_alive(this);
5918     G1KeepAliveClosure keep_alive(this);
5919     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5920     if (G1StringDedup::is_enabled()) {
5921       G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5922     }
5923   }
5924 
5925   release_gc_alloc_regions(n_workers, evacuation_info);
5926   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5927 
5928   // Reset and re-enable the hot card cache.
5929   // Note the counts for the cards in the regions in the
5930   // collection set are reset when the collection set is freed.
5931   hot_card_cache->reset_hot_cache();
5932   hot_card_cache->set_use_cache(true);
5933 
5934   // Migrate the strong code roots attached to each region in
5935   // the collection set. Ideally we would like to do this
5936   // after we have finished the scanning/evacuation of the
5937   // strong code roots for a particular heap region.
5938   migrate_strong_code_roots();
5939 
5940   if (g1_policy()->during_initial_mark_pause()) {
5941     // Reset the claim values set during marking the strong code roots
5942     reset_heap_region_claim_values();


6386       // humongous region set
6387     } else {
6388       // The rest should be old
6389       _old_set->remove(r);
6390     }
6391     return false;
6392   }
6393 
6394   ~TearDownRegionSetsClosure() {
6395     assert(_old_set->is_empty(), "post-condition");
6396   }
6397 };
6398 
6399 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6400   assert_at_safepoint(true /* should_be_vm_thread */);
6401 
6402   if (!free_list_only) {
6403     TearDownRegionSetsClosure cl(&_old_set);
6404     heap_region_iterate(&cl);
6405 
6406     // Note that emptying the _young_list is postponed and instead done as
6407     // the first step when rebuilding the regions sets again. The reason for
6408     // this is that during a full GC string deduplication needs to know if
6409     // a collected region was young or old when the full GC was initiated.
6410   }
6411   _free_list.remove_all();
6412 }
6413 
6414 class RebuildRegionSetsClosure : public HeapRegionClosure {
6415 private:
6416   bool            _free_list_only;
6417   HeapRegionSet*   _old_set;
6418   FreeRegionList* _free_list;
6419   size_t          _total_used;
6420 
6421 public:
6422   RebuildRegionSetsClosure(bool free_list_only,
6423                            HeapRegionSet* old_set, FreeRegionList* free_list) :
6424     _free_list_only(free_list_only),
6425     _old_set(old_set), _free_list(free_list), _total_used(0) {
6426     assert(_free_list->is_empty(), "pre-condition");
6427     if (!free_list_only) {
6428       assert(_old_set->is_empty(), "pre-condition");
6429     }


6443       if (r->isHumongous()) {
6444         // We ignore humongous regions, we left the humongous set unchanged
6445       } else {
6446         // The rest should be old, add them to the old set
6447         _old_set->add(r);
6448       }
6449       _total_used += r->used();
6450     }
6451 
6452     return false;
6453   }
6454 
6455   size_t total_used() {
6456     return _total_used;
6457   }
6458 };
6459 
6460 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6461   assert_at_safepoint(true /* should_be_vm_thread */);
6462 
6463   if (!free_list_only) {
6464     _young_list->empty_list();
6465   }
6466 
6467   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6468   heap_region_iterate(&cl);
6469 
6470   if (!free_list_only) {
6471     _summary_bytes_used = cl.total_used();
6472   }
6473   assert(_summary_bytes_used == recalculate_used(),
6474          err_msg("inconsistent _summary_bytes_used, "
6475                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6476                  _summary_bytes_used, recalculate_used()));
6477 }
6478 
6479 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6480   _refine_cte_cl->set_concurrent(concurrent);
6481 }
6482 
6483 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6484   HeapRegion* hr = heap_region_containing(p);
6485   if (hr == NULL) {
6486     return false;