src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc-chunked-growablearray Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"

  54 
  55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  56 
  57 // turn it on so that the contents of the young list (scan-only /
  58 // to-be-collected) are printed at "strategic" points before / during
  59 // / after the collection --- this is useful for debugging
  60 #define YOUNG_LIST_VERBOSE 0
  61 // CURRENT STATUS
  62 // This file is under construction.  Search for "FIXME".
  63 
  64 // INVARIANTS/NOTES
  65 //
  66 // All allocation activity covered by the G1CollectedHeap interface is
  67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  68 // and allocate_new_tlab, which are the "entry" points to the
  69 // allocation code from the rest of the JVM.  (Note that this does not
  70 // apply to TLAB allocation, which is not part of this interface: it
  71 // is done by clients of this interface.)
  72 
  73 // Notes on implementation of parallelism in different tasks.


1876 }
1877 
1878 // Public methods.
1879 
1880 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1881 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1882 #endif // _MSC_VER
1883 
1884 
1885 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1886   SharedHeap(policy_),
1887   _g1_policy(policy_),
1888   _dirty_card_queue_set(false),
1889   _into_cset_dirty_card_queue_set(false),
1890   _is_alive_closure_cm(this),
1891   _is_alive_closure_stw(this),
1892   _ref_processor_cm(NULL),
1893   _ref_processor_stw(NULL),
1894   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1895   _bot_shared(NULL),
1896   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1897   _evac_failure_scan_stack(NULL) ,
1898   _mark_in_progress(false),
1899   _cg1r(NULL), _summary_bytes_used(0),
1900   _g1mm(NULL),
1901   _refine_cte_cl(NULL),
1902   _full_collection(false),
1903   _free_list("Master Free List"),
1904   _secondary_free_list("Secondary Free List"),
1905   _old_set("Old Set"),
1906   _humongous_set("Master Humongous Set"),
1907   _free_regions_coming(false),
1908   _young_list(new YoungList(this)),
1909   _gc_time_stamp(0),
1910   _retained_old_gc_alloc_region(NULL),
1911   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1912   _old_plab_stats(OldPLABSize, PLABWeight),
1913   _expand_heap_after_alloc_failure(true),
1914   _surviving_young_words(NULL),
1915   _old_marking_cycles_started(0),
1916   _old_marking_cycles_completed(0),


4198   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4199 
4200   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4201 
4202   if (G1CollectedHeap::use_parallel_gc_threads()) {
4203     set_par_threads();
4204     workers()->run_task(&rsfp_task);
4205     set_par_threads(0);
4206   } else {
4207     rsfp_task.work(0);
4208   }
4209 
4210   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4211 
4212   // Reset the claim values in the regions in the collection set.
4213   reset_cset_heap_region_claim_values();
4214 
4215   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4216 
4217   // Now restore saved marks, if any.
4218   if (_objs_with_preserved_marks != NULL) {
4219     assert(_preserved_marks_of_objs != NULL, "Both or none.");
4220     guarantee(_objs_with_preserved_marks->length() ==
4221               _preserved_marks_of_objs->length(), "Both or none.");
4222     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
4223       oop obj   = _objs_with_preserved_marks->at(i);
4224       markOop m = _preserved_marks_of_objs->at(i);
4225       obj->set_mark(m);
4226     }
4227 
4228     // Delete the preserved marks growable arrays (allocated on the C heap).
4229     delete _objs_with_preserved_marks;
4230     delete _preserved_marks_of_objs;
4231     _objs_with_preserved_marks = NULL;
4232     _preserved_marks_of_objs = NULL;
4233   }
4234 }
4235 
4236 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4237   _evac_failure_scan_stack->push(obj);
4238 }
4239 
4240 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4241   assert(_evac_failure_scan_stack != NULL, "precondition");
4242 
4243   while (_evac_failure_scan_stack->length() > 0) {
4244      oop obj = _evac_failure_scan_stack->pop();
4245      _evac_failure_closure->set_region(heap_region_containing(obj));
4246      obj->oop_iterate_backwards(_evac_failure_closure);
4247   }
4248 }
4249 
4250 oop
4251 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4252                                                oop old) {
4253   assert(obj_in_cs(old),


4296   if (!r->evacuation_failed()) {
4297     r->set_evacuation_failed(true);
4298     _hr_printer.evac_failure(r);
4299   }
4300 
4301   push_on_evac_failure_scan_stack(old);
4302 
4303   if (!_drain_in_progress) {
4304     // prevent recursion in copy_to_survivor_space()
4305     _drain_in_progress = true;
4306     drain_evac_failure_scan_stack();
4307     _drain_in_progress = false;
4308   }
4309 }
4310 
4311 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4312   assert(evacuation_failed(), "Oversaving!");
4313   // We want to call the "for_promotion_failure" version only in the
4314   // case of a promotion failure.
4315   if (m->must_be_preserved_for_promotion_failure(obj)) {
4316     if (_objs_with_preserved_marks == NULL) {
4317       assert(_preserved_marks_of_objs == NULL, "Both or none.");
4318       _objs_with_preserved_marks =
4319         new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4320       _preserved_marks_of_objs =
4321         new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
4322     }
4323     _objs_with_preserved_marks->push(obj);
4324     _preserved_marks_of_objs->push(m);
4325   }
4326 }
4327 
4328 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4329                                                   size_t word_size) {
4330   if (purpose == GCAllocForSurvived) {
4331     HeapWord* result = survivor_attempt_allocation(word_size);
4332     if (result != NULL) {
4333       return result;
4334     } else {
4335       // Let's try to allocate in the old gen in case we can fit the
4336       // object there.
4337       return old_attempt_allocation(word_size);
4338     }
4339   } else {
4340     assert(purpose ==  GCAllocForTenured, "sanity");
4341     HeapWord* result = old_attempt_allocation(word_size);
4342     if (result != NULL) {
4343       return result;
4344     } else {




  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "utilities/stack.hpp"
  55 
  56 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  57 
  58 // turn it on so that the contents of the young list (scan-only /
  59 // to-be-collected) are printed at "strategic" points before / during
  60 // / after the collection --- this is useful for debugging
  61 #define YOUNG_LIST_VERBOSE 0
  62 // CURRENT STATUS
  63 // This file is under construction.  Search for "FIXME".
  64 
  65 // INVARIANTS/NOTES
  66 //
  67 // All allocation activity covered by the G1CollectedHeap interface is
  68 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  69 // and allocate_new_tlab, which are the "entry" points to the
  70 // allocation code from the rest of the JVM.  (Note that this does not
  71 // apply to TLAB allocation, which is not part of this interface: it
  72 // is done by clients of this interface.)
  73 
  74 // Notes on implementation of parallelism in different tasks.


1877 }
1878 
1879 // Public methods.
1880 
1881 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1882 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1883 #endif // _MSC_VER
1884 
1885 
1886 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1887   SharedHeap(policy_),
1888   _g1_policy(policy_),
1889   _dirty_card_queue_set(false),
1890   _into_cset_dirty_card_queue_set(false),
1891   _is_alive_closure_cm(this),
1892   _is_alive_closure_stw(this),
1893   _ref_processor_cm(NULL),
1894   _ref_processor_stw(NULL),
1895   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1896   _bot_shared(NULL),

1897   _evac_failure_scan_stack(NULL) ,
1898   _mark_in_progress(false),
1899   _cg1r(NULL), _summary_bytes_used(0),
1900   _g1mm(NULL),
1901   _refine_cte_cl(NULL),
1902   _full_collection(false),
1903   _free_list("Master Free List"),
1904   _secondary_free_list("Secondary Free List"),
1905   _old_set("Old Set"),
1906   _humongous_set("Master Humongous Set"),
1907   _free_regions_coming(false),
1908   _young_list(new YoungList(this)),
1909   _gc_time_stamp(0),
1910   _retained_old_gc_alloc_region(NULL),
1911   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1912   _old_plab_stats(OldPLABSize, PLABWeight),
1913   _expand_heap_after_alloc_failure(true),
1914   _surviving_young_words(NULL),
1915   _old_marking_cycles_started(0),
1916   _old_marking_cycles_completed(0),


4198   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4199 
4200   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4201 
4202   if (G1CollectedHeap::use_parallel_gc_threads()) {
4203     set_par_threads();
4204     workers()->run_task(&rsfp_task);
4205     set_par_threads(0);
4206   } else {
4207     rsfp_task.work(0);
4208   }
4209 
4210   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4211 
4212   // Reset the claim values in the regions in the collection set.
4213   reset_cset_heap_region_claim_values();
4214 
4215   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4216 
4217   // Now restore saved marks, if any.
4218   guarantee(_objs_with_preserved_marks.size() ==
4219             _preserved_marks_of_objs.size(), "Both or none.");
4220   while (!_objs_with_preserved_marks.is_empty()) {
4221     oop obj = _objs_with_preserved_marks.pop();
4222     markOop m = _preserved_marks_of_objs.pop();


4223     obj->set_mark(m);
4224   }
4225   _objs_with_preserved_marks.clear(true);
4226   _preserved_marks_of_objs.clear(true);





4227 }
4228 
4229 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4230   _evac_failure_scan_stack->push(obj);
4231 }
4232 
4233 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4234   assert(_evac_failure_scan_stack != NULL, "precondition");
4235 
4236   while (_evac_failure_scan_stack->length() > 0) {
4237      oop obj = _evac_failure_scan_stack->pop();
4238      _evac_failure_closure->set_region(heap_region_containing(obj));
4239      obj->oop_iterate_backwards(_evac_failure_closure);
4240   }
4241 }
4242 
4243 oop
4244 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4245                                                oop old) {
4246   assert(obj_in_cs(old),


4289   if (!r->evacuation_failed()) {
4290     r->set_evacuation_failed(true);
4291     _hr_printer.evac_failure(r);
4292   }
4293 
4294   push_on_evac_failure_scan_stack(old);
4295 
4296   if (!_drain_in_progress) {
4297     // prevent recursion in copy_to_survivor_space()
4298     _drain_in_progress = true;
4299     drain_evac_failure_scan_stack();
4300     _drain_in_progress = false;
4301   }
4302 }
4303 
4304 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4305   assert(evacuation_failed(), "Oversaving!");
4306   // We want to call the "for_promotion_failure" version only in the
4307   // case of a promotion failure.
4308   if (m->must_be_preserved_for_promotion_failure(obj)) {
4309     _objs_with_preserved_marks.push(obj);
4310     _preserved_marks_of_objs.push(m);







4311   }
4312 }
4313 
4314 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4315                                                   size_t word_size) {
4316   if (purpose == GCAllocForSurvived) {
4317     HeapWord* result = survivor_attempt_allocation(word_size);
4318     if (result != NULL) {
4319       return result;
4320     } else {
4321       // Let's try to allocate in the old gen in case we can fit the
4322       // object there.
4323       return old_attempt_allocation(word_size);
4324     }
4325   } else {
4326     assert(purpose ==  GCAllocForTenured, "sanity");
4327     HeapWord* result = old_attempt_allocation(word_size);
4328     if (result != NULL) {
4329       return result;
4330     } else {


src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File