< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7471 : 8060025: Object copy time regressions after JDK-8031323 and JDK-8057536
Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively.
Reviewed-by:
Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 7472 : [mq]: 8060025-mikael-review1
rev 7473 : imported patch mikael-refactor-cset-state
rev 7474 : [mq]: kim-review


3802 
3803 #if YOUNG_LIST_VERBOSE
3804         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3805         _young_list->print();
3806 #endif // YOUNG_LIST_VERBOSE
3807 
3808         if (g1_policy()->during_initial_mark_pause()) {
3809           concurrent_mark()->checkpointRootsInitialPre();
3810         }
3811 
3812 #if YOUNG_LIST_VERBOSE
3813         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3814         _young_list->print();
3815         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3816 #endif // YOUNG_LIST_VERBOSE
3817 
3818         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3819 
3820         register_humongous_regions_with_in_cset_fast_test();
3821 


3822         _cm->note_start_of_gc();
3823         // We should not verify the per-thread SATB buffers given that
3824         // we have not filtered them yet (we'll do so during the
3825         // GC). We also call this after finalize_cset() to
3826         // ensure that the CSet has been finalized.
3827         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3828                                  true  /* verify_enqueued_buffers */,
3829                                  false /* verify_thread_buffers */,
3830                                  true  /* verify_fingers */);
3831 
3832         if (_hr_printer.is_active()) {
3833           HeapRegion* hr = g1_policy()->collection_set();
3834           while (hr != NULL) {
3835             _hr_printer.cset(hr);
3836             hr = hr->next_in_collection_set();
3837           }
3838         }
3839 
3840 #ifdef ASSERT
3841         VerifyCSetClosure cl;


4031     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4032   }
4033   // It should now be safe to tell the concurrent mark thread to start
4034   // without its logging output interfering with the logging output
4035   // that came from the pause.
4036 
4037   if (should_start_conc_mark) {
4038     // CAUTION: after the doConcurrentMark() call below,
4039     // the concurrent marking thread(s) could be running
4040     // concurrently with us. Make sure that anything after
4041     // this point does not assume that we are the only GC thread
4042     // running. Note: of course, the actual marking work will
4043     // not start until the safepoint itself is released in
4044     // SuspendibleThreadSet::desynchronize().
4045     doConcurrentMark();
4046   }
4047 
4048   return true;
4049 }
4050 
4051 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4052 {
4053   size_t gclab_word_size;
4054   switch (purpose) {
4055     case GCAllocForSurvived:
4056       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
4057       break;
4058     case GCAllocForTenured:
4059       gclab_word_size = _old_plab_stats.desired_plab_sz();
4060       break;
4061     default:
4062       assert(false, "unknown GCAllocPurpose");
4063       gclab_word_size = _old_plab_stats.desired_plab_sz();
4064       break;
4065   }
4066 
4067   // Prevent humongous PLAB sizes for two reasons:
4068   // * PLABs are allocated using a similar paths as oops, but should
4069   //   never be in a humongous region
4070   // * Allowing humongous PLABs needlessly churns the region free lists
4071   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4072 }
4073 
4074 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4075   _drain_in_progress = false;
4076   set_evac_failure_closure(cl);
4077   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4078 }
4079 
4080 void G1CollectedHeap::finalize_for_evac_failure() {
4081   assert(_evac_failure_scan_stack != NULL &&
4082          _evac_failure_scan_stack->length() == 0,
4083          "Postcondition");
4084   assert(!_drain_in_progress, "Postcondition");
4085   delete _evac_failure_scan_stack;
4086   _evac_failure_scan_stack = NULL;
4087 }
4088 
4089 void G1CollectedHeap::remove_self_forwarding_pointers() {
4090   double remove_self_forwards_start = os::elapsedTime();
4091 
4092   set_par_threads();
4093   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);


4179   push_on_evac_failure_scan_stack(old);
4180 
4181   if (!_drain_in_progress) {
4182     // prevent recursion in copy_to_survivor_space()
4183     _drain_in_progress = true;
4184     drain_evac_failure_scan_stack();
4185     _drain_in_progress = false;
4186   }
4187 }
4188 
4189 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4190   assert(evacuation_failed(), "Oversaving!");
4191   // We want to call the "for_promotion_failure" version only in the
4192   // case of a promotion failure.
4193   if (m->must_be_preserved_for_promotion_failure(obj)) {
4194     _objs_with_preserved_marks.push(obj);
4195     _preserved_marks_of_objs.push(m);
4196   }
4197 }
4198 
4199 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4200                                                   size_t word_size,
4201                                                   AllocationContext_t context) {
4202   if (purpose == GCAllocForSurvived) {
4203     HeapWord* result = survivor_attempt_allocation(word_size, context);
4204     if (result != NULL) {
4205       return result;
4206     } else {
4207       // Let's try to allocate in the old gen in case we can fit the
4208       // object there.
4209       return old_attempt_allocation(word_size, context);
4210     }
4211   } else {
4212     assert(purpose ==  GCAllocForTenured, "sanity");
4213     HeapWord* result = old_attempt_allocation(word_size, context);
4214     if (result != NULL) {
4215       return result;
4216     } else {
4217       // Let's try to allocate in the survivors in case we can fit the
4218       // object there.
4219       return survivor_attempt_allocation(word_size, context);
4220     }
4221   }
4222 
4223   ShouldNotReachHere();
4224   // Trying to keep some compilers happy.
4225   return NULL;
4226 }
4227 
4228 void G1ParCopyHelper::mark_object(oop obj) {
4229   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4230 
4231   // We know that the object is not moving so it's safe to read its size.
4232   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4233 }
4234 
4235 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4236   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4237   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4238   assert(from_obj != to_obj, "should not be self-forwarded");
4239 
4240   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4241   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4242 
4243   // The object might be in the process of being copied by another
4244   // worker so we cannot trust that its to-space image is
4245   // well-formed. So we have to read its size from its from-space
4246   // image which we know should not be changing.
4247   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);


4250 template <class T>
4251 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4252   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4253     _scanned_klass->record_modified_oops();
4254   }
4255 }
4256 
4257 template <G1Barrier barrier, G1Mark do_mark_object>
4258 template <class T>
4259 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4260   T heap_oop = oopDesc::load_heap_oop(p);
4261 
4262   if (oopDesc::is_null(heap_oop)) {
4263     return;
4264   }
4265 
4266   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4267 
4268   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4269 
4270   G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
4271 
4272   if (state == G1CollectedHeap::InCSet) {
4273     oop forwardee;
4274     markOop m = obj->mark();
4275     if (m->is_marked()) {
4276       forwardee = (oop) m->decode_pointer();
4277     } else {
4278       forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
4279     }
4280     assert(forwardee != NULL, "forwardee should not be NULL");
4281     oopDesc::encode_store_heap_oop(p, forwardee);
4282     if (do_mark_object != G1MarkNone && forwardee != obj) {
4283       // If the object is self-forwarded we don't need to explicitly
4284       // mark it, the evacuation failure protocol will do so.
4285       mark_forwarded_object(obj, forwardee);
4286     }
4287 
4288     if (barrier == G1BarrierKlass) {
4289       do_klass_barrier(p, forwardee);
4290     }
4291   } else {
4292     if (state == G1CollectedHeap::IsHumongous) {
4293       _g1->set_humongous_is_live(obj);
4294     }
4295     // The object is not in collection set. If we're a root scanning
4296     // closure during an initial mark pause then attempt to mark the object.
4297     if (do_mark_object == G1MarkFromRoot) {
4298       mark_object(obj);
4299     }
4300   }
4301 
4302   if (barrier == G1BarrierEvac) {
4303     _par_scan_state->update_rs(_from, p, _worker_id);
4304   }
4305 }
4306 
4307 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4308 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4309 
4310 class G1ParEvacuateFollowersClosure : public VoidClosure {
4311 protected:
4312   G1CollectedHeap*              _g1h;


5128     return false;
5129   }
5130 };
5131 
5132 bool G1STWIsAliveClosure::do_object_b(oop p) {
5133   // An object is reachable if it is outside the collection set,
5134   // or is inside and copied.
5135   return !_g1->obj_in_cs(p) || p->is_forwarded();
5136 }
5137 
5138 // Non Copying Keep Alive closure
5139 class G1KeepAliveClosure: public OopClosure {
5140   G1CollectedHeap* _g1;
5141 public:
5142   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5143   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5144   void do_oop(oop* p) {
5145     oop obj = *p;
5146     assert(obj != NULL, "the caller should have filtered out NULL values");
5147 
5148     G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
5149     if (cset_state == G1CollectedHeap::InNeither) {
5150       return;
5151     }
5152     if (cset_state == G1CollectedHeap::InCSet) {
5153       assert( obj->is_forwarded(), "invariant" );
5154       *p = obj->forwardee();
5155     } else {
5156       assert(!obj->is_forwarded(), "invariant" );
5157       assert(cset_state == G1CollectedHeap::IsHumongous,
5158              err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
5159       _g1->set_humongous_is_live(obj);
5160     }
5161   }
5162 };
5163 
5164 // Copying Keep Alive closure - can be called from both
5165 // serial and parallel code as long as different worker
5166 // threads utilize different G1ParScanThreadState instances
5167 // and different queues.
5168 
5169 class G1CopyingKeepAliveClosure: public OopClosure {
5170   G1CollectedHeap*         _g1h;
5171   OopClosure*              _copy_non_heap_obj_cl;
5172   G1ParScanThreadState*    _par_scan_state;
5173 
5174 public:
5175   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5176                             OopClosure* non_heap_obj_cl,
5177                             G1ParScanThreadState* pss):
5178     _g1h(g1h),


5934   bool failures() { return _failures; }
5935 
5936   virtual bool doHeapRegion(HeapRegion* hr) {
5937     if (hr->is_continues_humongous()) return false;
5938 
5939     bool result = _g1h->verify_bitmaps(_caller, hr);
5940     if (!result) {
5941       _failures = true;
5942     }
5943     return false;
5944   }
5945 };
5946 
5947 void G1CollectedHeap::check_bitmaps(const char* caller) {
5948   if (!G1VerifyBitmaps) return;
5949 
5950   G1VerifyBitmapClosure cl(caller, this);
5951   heap_region_iterate(&cl);
5952   guarantee(!cl.failures(), "bitmap verification");
5953 }




















































5954 #endif // PRODUCT
5955 
5956 void G1CollectedHeap::cleanUpCardTable() {
5957   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5958   double start = os::elapsedTime();
5959 
5960   {
5961     // Iterate over the dirty cards region list.
5962     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5963 
5964     set_par_threads();
5965     workers()->run_task(&cleanup_task);
5966     set_par_threads(0);
5967 #ifndef PRODUCT
5968     if (G1VerifyCTCleanup || VerifyAfterGC) {
5969       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5970       heap_region_iterate(&cleanup_verifier);
5971     }
5972 #endif
5973   }


6502 
6503 void G1CollectedHeap::set_par_threads() {
6504   // Don't change the number of workers.  Use the value previously set
6505   // in the workgroup.
6506   uint n_workers = workers()->active_workers();
6507   assert(UseDynamicNumberOfGCThreads ||
6508            n_workers == workers()->total_workers(),
6509       "Otherwise should be using the total number of workers");
6510   if (n_workers == 0) {
6511     assert(false, "Should have been set in prior evacuation pause.");
6512     n_workers = ParallelGCThreads;
6513     workers()->set_active_workers(n_workers);
6514   }
6515   set_par_threads(n_workers);
6516 }
6517 
6518 // Methods for the GC alloc regions
6519 
6520 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6521                                                  uint count,
6522                                                  GCAllocPurpose ap) {
6523   assert(FreeList_lock->owned_by_self(), "pre-condition");
6524 
6525   if (count < g1_policy()->max_regions(ap)) {
6526     bool survivor = (ap == GCAllocForSurvived);
6527     HeapRegion* new_alloc_region = new_region(word_size,
6528                                               !survivor,
6529                                               true /* do_expand */);
6530     if (new_alloc_region != NULL) {
6531       // We really only need to do this for old regions given that we
6532       // should never scan survivors. But it doesn't hurt to do it
6533       // for survivors too.
6534       new_alloc_region->record_timestamp();
6535       if (survivor) {
6536         new_alloc_region->set_survivor();
6537         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6538         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6539       } else {
6540         new_alloc_region->set_old();
6541         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6542         check_bitmaps("Old Region Allocation", new_alloc_region);
6543       }
6544       bool during_im = g1_policy()->during_initial_mark_pause();
6545       new_alloc_region->note_start_of_copying(during_im);
6546       return new_alloc_region;
6547     } else {
6548       g1_policy()->note_alloc_region_limit_reached(ap);
6549     }
6550   }
6551   return NULL;
6552 }
6553 
6554 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6555                                              size_t allocated_bytes,
6556                                              GCAllocPurpose ap) {
6557   bool during_im = g1_policy()->during_initial_mark_pause();
6558   alloc_region->note_end_of_copying(during_im);
6559   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6560   if (ap == GCAllocForSurvived) {
6561     young_list()->add_survivor_region(alloc_region);
6562   } else {
6563     _old_set.add(alloc_region);
6564   }
6565   _hr_printer.retire(alloc_region);
6566 }
6567 
6568 // Heap region set verification
6569 
6570 class VerifyRegionListsClosure : public HeapRegionClosure {
6571 private:
6572   HeapRegionSet*   _old_set;
6573   HeapRegionSet*   _humongous_set;
6574   HeapRegionManager*   _hrm;
6575 
6576 public:
6577   HeapRegionSetCount _old_count;
6578   HeapRegionSetCount _humongous_count;
6579   HeapRegionSetCount _free_count;
6580 




3802 
3803 #if YOUNG_LIST_VERBOSE
3804         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3805         _young_list->print();
3806 #endif // YOUNG_LIST_VERBOSE
3807 
3808         if (g1_policy()->during_initial_mark_pause()) {
3809           concurrent_mark()->checkpointRootsInitialPre();
3810         }
3811 
3812 #if YOUNG_LIST_VERBOSE
3813         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3814         _young_list->print();
3815         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3816 #endif // YOUNG_LIST_VERBOSE
3817 
3818         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3819 
3820         register_humongous_regions_with_in_cset_fast_test();
3821 
3822         assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3823 
3824         _cm->note_start_of_gc();
3825         // We should not verify the per-thread SATB buffers given that
3826         // we have not filtered them yet (we'll do so during the
3827         // GC). We also call this after finalize_cset() to
3828         // ensure that the CSet has been finalized.
3829         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3830                                  true  /* verify_enqueued_buffers */,
3831                                  false /* verify_thread_buffers */,
3832                                  true  /* verify_fingers */);
3833 
3834         if (_hr_printer.is_active()) {
3835           HeapRegion* hr = g1_policy()->collection_set();
3836           while (hr != NULL) {
3837             _hr_printer.cset(hr);
3838             hr = hr->next_in_collection_set();
3839           }
3840         }
3841 
3842 #ifdef ASSERT
3843         VerifyCSetClosure cl;


4033     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4034   }
4035   // It should now be safe to tell the concurrent mark thread to start
4036   // without its logging output interfering with the logging output
4037   // that came from the pause.
4038 
4039   if (should_start_conc_mark) {
4040     // CAUTION: after the doConcurrentMark() call below,
4041     // the concurrent marking thread(s) could be running
4042     // concurrently with us. Make sure that anything after
4043     // this point does not assume that we are the only GC thread
4044     // running. Note: of course, the actual marking work will
4045     // not start until the safepoint itself is released in
4046     // SuspendibleThreadSet::desynchronize().
4047     doConcurrentMark();
4048   }
4049 
4050   return true;
4051 }
4052 























4053 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4054   _drain_in_progress = false;
4055   set_evac_failure_closure(cl);
4056   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4057 }
4058 
4059 void G1CollectedHeap::finalize_for_evac_failure() {
4060   assert(_evac_failure_scan_stack != NULL &&
4061          _evac_failure_scan_stack->length() == 0,
4062          "Postcondition");
4063   assert(!_drain_in_progress, "Postcondition");
4064   delete _evac_failure_scan_stack;
4065   _evac_failure_scan_stack = NULL;
4066 }
4067 
4068 void G1CollectedHeap::remove_self_forwarding_pointers() {
4069   double remove_self_forwards_start = os::elapsedTime();
4070 
4071   set_par_threads();
4072   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);


4158   push_on_evac_failure_scan_stack(old);
4159 
4160   if (!_drain_in_progress) {
4161     // prevent recursion in copy_to_survivor_space()
4162     _drain_in_progress = true;
4163     drain_evac_failure_scan_stack();
4164     _drain_in_progress = false;
4165   }
4166 }
4167 
4168 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4169   assert(evacuation_failed(), "Oversaving!");
4170   // We want to call the "for_promotion_failure" version only in the
4171   // case of a promotion failure.
4172   if (m->must_be_preserved_for_promotion_failure(obj)) {
4173     _objs_with_preserved_marks.push(obj);
4174     _preserved_marks_of_objs.push(m);
4175   }
4176 }
4177 





























4178 void G1ParCopyHelper::mark_object(oop obj) {
4179   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4180 
4181   // We know that the object is not moving so it's safe to read its size.
4182   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4183 }
4184 
4185 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4186   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4187   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4188   assert(from_obj != to_obj, "should not be self-forwarded");
4189 
4190   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4191   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4192 
4193   // The object might be in the process of being copied by another
4194   // worker so we cannot trust that its to-space image is
4195   // well-formed. So we have to read its size from its from-space
4196   // image which we know should not be changing.
4197   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);


4200 template <class T>
4201 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4202   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4203     _scanned_klass->record_modified_oops();
4204   }
4205 }
4206 
4207 template <G1Barrier barrier, G1Mark do_mark_object>
4208 template <class T>
4209 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4210   T heap_oop = oopDesc::load_heap_oop(p);
4211 
4212   if (oopDesc::is_null(heap_oop)) {
4213     return;
4214   }
4215 
4216   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4217 
4218   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4219 
4220   const InCSetState state = _g1->in_cset_state(obj);
4221   if (state.is_in_cset()) {

4222     oop forwardee;
4223     markOop m = obj->mark();
4224     if (m->is_marked()) {
4225       forwardee = (oop) m->decode_pointer();
4226     } else {
4227       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4228     }
4229     assert(forwardee != NULL, "forwardee should not be NULL");
4230     oopDesc::encode_store_heap_oop(p, forwardee);
4231     if (do_mark_object != G1MarkNone && forwardee != obj) {
4232       // If the object is self-forwarded we don't need to explicitly
4233       // mark it, the evacuation failure protocol will do so.
4234       mark_forwarded_object(obj, forwardee);
4235     }
4236 
4237     if (barrier == G1BarrierKlass) {
4238       do_klass_barrier(p, forwardee);
4239     }
4240   } else {
4241     if (state.is_humongous()) {
4242       _g1->set_humongous_is_live(obj);
4243     }
4244     // The object is not in collection set. If we're a root scanning
4245     // closure during an initial mark pause then attempt to mark the object.
4246     if (do_mark_object == G1MarkFromRoot) {
4247       mark_object(obj);
4248     }
4249   }
4250 
4251   if (barrier == G1BarrierEvac) {
4252     _par_scan_state->update_rs(_from, p, _worker_id);
4253   }
4254 }
4255 
4256 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4257 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4258 
4259 class G1ParEvacuateFollowersClosure : public VoidClosure {
4260 protected:
4261   G1CollectedHeap*              _g1h;


5077     return false;
5078   }
5079 };
5080 
5081 bool G1STWIsAliveClosure::do_object_b(oop p) {
5082   // An object is reachable if it is outside the collection set,
5083   // or is inside and copied.
5084   return !_g1->obj_in_cs(p) || p->is_forwarded();
5085 }
5086 
5087 // Non Copying Keep Alive closure
5088 class G1KeepAliveClosure: public OopClosure {
5089   G1CollectedHeap* _g1;
5090 public:
5091   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5092   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5093   void do_oop(oop* p) {
5094     oop obj = *p;
5095     assert(obj != NULL, "the caller should have filtered out NULL values");
5096 
5097     const InCSetState cset_state = _g1->in_cset_state(obj);
5098     if (cset_state.is_not_in_cset()) {
5099       return;
5100     }
5101     if (cset_state.is_in_cset()) {
5102       assert( obj->is_forwarded(), "invariant" );
5103       *p = obj->forwardee();
5104     } else {
5105       assert(!obj->is_forwarded(), "invariant" );
5106       assert(cset_state.is_humongous(),
5107              err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5108       _g1->set_humongous_is_live(obj);
5109     }
5110   }
5111 };
5112 
5113 // Copying Keep Alive closure - can be called from both
5114 // serial and parallel code as long as different worker
5115 // threads utilize different G1ParScanThreadState instances
5116 // and different queues.
5117 
5118 class G1CopyingKeepAliveClosure: public OopClosure {
5119   G1CollectedHeap*         _g1h;
5120   OopClosure*              _copy_non_heap_obj_cl;
5121   G1ParScanThreadState*    _par_scan_state;
5122 
5123 public:
5124   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5125                             OopClosure* non_heap_obj_cl,
5126                             G1ParScanThreadState* pss):
5127     _g1h(g1h),


5883   bool failures() { return _failures; }
5884 
5885   virtual bool doHeapRegion(HeapRegion* hr) {
5886     if (hr->is_continues_humongous()) return false;
5887 
5888     bool result = _g1h->verify_bitmaps(_caller, hr);
5889     if (!result) {
5890       _failures = true;
5891     }
5892     return false;
5893   }
5894 };
5895 
5896 void G1CollectedHeap::check_bitmaps(const char* caller) {
5897   if (!G1VerifyBitmaps) return;
5898 
5899   G1VerifyBitmapClosure cl(caller, this);
5900   heap_region_iterate(&cl);
5901   guarantee(!cl.failures(), "bitmap verification");
5902 }
5903 
5904 bool G1CollectedHeap::check_cset_fast_test() {
5905   bool failures = false;
5906   for (uint i = 0; i < _hrm.length(); i += 1) {
5907     HeapRegion* hr = _hrm.at(i);
5908     InCSetState cset_state = (InCSetState) _in_cset_fast_test.get_by_index((uint) i);
5909     if (hr->is_humongous()) {
5910       if (hr->in_collection_set()) {
5911         gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5912         failures = true;
5913         break;
5914       }
5915       if (cset_state.is_in_cset()) {
5916         gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5917         failures = true;
5918         break;
5919       }
5920       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5921         gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5922         failures = true;
5923         break;
5924       }
5925     } else {
5926       if (cset_state.is_humongous()) {
5927         gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5928         failures = true;
5929         break;
5930       }
5931       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5932         gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5933                                hr->in_collection_set(), cset_state.value(), i);
5934         failures = true;
5935         break;
5936       }
5937       if (cset_state.is_in_cset()) {
5938         if (hr->is_young() != (cset_state.is_young())) {
5939           gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5940                                  hr->is_young(), cset_state.value(), i);
5941           failures = true;
5942           break;
5943         }
5944         if (hr->is_old() != (cset_state.is_old())) {
5945           gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5946                                  hr->is_old(), cset_state.value(), i);
5947           failures = true;
5948           break;
5949         }
5950       }
5951     }
5952   }
5953   return !failures;
5954 }
5955 #endif // PRODUCT
5956 
5957 void G1CollectedHeap::cleanUpCardTable() {
5958   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5959   double start = os::elapsedTime();
5960 
5961   {
5962     // Iterate over the dirty cards region list.
5963     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5964 
5965     set_par_threads();
5966     workers()->run_task(&cleanup_task);
5967     set_par_threads(0);
5968 #ifndef PRODUCT
5969     if (G1VerifyCTCleanup || VerifyAfterGC) {
5970       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5971       heap_region_iterate(&cleanup_verifier);
5972     }
5973 #endif
5974   }


6503 
6504 void G1CollectedHeap::set_par_threads() {
6505   // Don't change the number of workers.  Use the value previously set
6506   // in the workgroup.
6507   uint n_workers = workers()->active_workers();
6508   assert(UseDynamicNumberOfGCThreads ||
6509            n_workers == workers()->total_workers(),
6510       "Otherwise should be using the total number of workers");
6511   if (n_workers == 0) {
6512     assert(false, "Should have been set in prior evacuation pause.");
6513     n_workers = ParallelGCThreads;
6514     workers()->set_active_workers(n_workers);
6515   }
6516   set_par_threads(n_workers);
6517 }
6518 
6519 // Methods for the GC alloc regions
6520 
6521 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6522                                                  uint count,
6523                                                  InCSetState dest) {
6524   assert(FreeList_lock->owned_by_self(), "pre-condition");
6525 
6526   if (count < g1_policy()->max_regions(dest)) {
6527     const bool is_survivor = (dest.is_young());
6528     HeapRegion* new_alloc_region = new_region(word_size,
6529                                               !is_survivor,
6530                                               true /* do_expand */);
6531     if (new_alloc_region != NULL) {
6532       // We really only need to do this for old regions given that we
6533       // should never scan survivors. But it doesn't hurt to do it
6534       // for survivors too.
6535       new_alloc_region->record_timestamp();
6536       if (is_survivor) {
6537         new_alloc_region->set_survivor();
6538         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6539         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6540       } else {
6541         new_alloc_region->set_old();
6542         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6543         check_bitmaps("Old Region Allocation", new_alloc_region);
6544       }
6545       bool during_im = g1_policy()->during_initial_mark_pause();
6546       new_alloc_region->note_start_of_copying(during_im);
6547       return new_alloc_region;


6548     }
6549   }
6550   return NULL;
6551 }
6552 
6553 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6554                                              size_t allocated_bytes,
6555                                              InCSetState dest) {
6556   bool during_im = g1_policy()->during_initial_mark_pause();
6557   alloc_region->note_end_of_copying(during_im);
6558   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6559   if (dest.is_young()) {
6560     young_list()->add_survivor_region(alloc_region);
6561   } else {
6562     _old_set.add(alloc_region);
6563   }
6564   _hr_printer.retire(alloc_region);
6565 }
6566 
6567 // Heap region set verification
6568 
6569 class VerifyRegionListsClosure : public HeapRegionClosure {
6570 private:
6571   HeapRegionSet*   _old_set;
6572   HeapRegionSet*   _humongous_set;
6573   HeapRegionManager*   _hrm;
6574 
6575 public:
6576   HeapRegionSetCount _old_count;
6577   HeapRegionSetCount _humongous_count;
6578   HeapRegionSetCount _free_count;
6579 


< prev index next >