src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 5917 : 8027559: Decrease code size and templatizing in G1ParCopyClosure::do_oop_work
Summary: Move methods that are not dependent on any of G1ParCopyClosure's template parameters into G1ParCopyHelper. Further remove unused methods and members of the class hierarchy.
Reviewed-by:
rev 5918 : 8035326: Assume non-NULL references in G1CollectedHeap::in_cset_fast_test
Summary: Remove the assumption that G1CollectedHeap::in_cset_fast_test needs to check for NULL references. Most of the time this is not required, making the code doing this check multiple times.
Reviewed-by:
rev 5919 : 8035329: Move G1ParCopyClosure::copy_to_survivor_space into G1ParScanThreadState
Summary: Move G1ParCopyClosure::copy_to_survivor_space to decrease code size.


4517   } else {
4518     assert(purpose ==  GCAllocForTenured, "sanity");
4519     HeapWord* result = old_attempt_allocation(word_size);
4520     if (result != NULL) {
4521       return result;
4522     } else {
4523       // Let's try to allocate in the survivors in case we can fit the
4524       // object there.
4525       return survivor_attempt_allocation(word_size);
4526     }
4527   }
4528 
4529   ShouldNotReachHere();
4530   // Trying to keep some compilers happy.
4531   return NULL;
4532 }
4533 
4534 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4535   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4536 
4537 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4538   : _g1h(g1h),
4539     _refs(g1h->task_queue(queue_num)),
4540     _dcq(&g1h->dirty_card_queue_set()),
4541     _ct_bs(g1h->g1_barrier_set()),
4542     _g1_rem(g1h->g1_rem_set()),
4543     _hash_seed(17), _queue_num(queue_num),
4544     _term_attempts(0),
4545     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4546     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4547     _age_table(false),
4548     _strong_roots_time(0), _term_time(0),
4549     _alloc_buffer_waste(0), _undo_waste(0) {
4550   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4551   // we "sacrifice" entry 0 to keep track of surviving bytes for
4552   // non-young regions (where the age is -1)
4553   // We also add a few elements at the beginning and at the end in
4554   // an attempt to eliminate cache contention
4555   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4556   uint array_length = PADDING_ELEM_NUM +
4557                       real_length +
4558                       PADDING_ELEM_NUM;
4559   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4560   if (_surviving_young_words_base == NULL)
4561     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4562                           "Not enough space for young surv histo.");
4563   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4564   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4565 
4566   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4567   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;


4672   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4673   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4674   assert(from_obj != to_obj, "should not be self-forwarded");
4675 
4676   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4677   assert(from_hr != NULL, "sanity");
4678   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4679 
4680   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4681   assert(to_hr != NULL, "sanity");
4682   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4683 #endif // ASSERT
4684 
4685   // The object might be in the process of being copied by another
4686   // worker so we cannot trust that its to-space image is
4687   // well-formed. So we have to read its size from its from-space
4688   // image which we know should not be changing.
4689   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4690 }
4691 
4692 template <G1Barrier barrier, bool do_mark_object>
4693 oop G1ParCopyClosure<barrier, do_mark_object>
4694   ::copy_to_survivor_space(oop old) {
4695   size_t word_sz = old->size();
4696   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4697   // +1 to make the -1 indexes valid...
4698   int       young_index = from_region->young_index_in_cset()+1;
4699   assert( (from_region->is_young() && young_index >  0) ||
4700          (!from_region->is_young() && young_index == 0), "invariant" );
4701   G1CollectorPolicy* g1p = _g1->g1_policy();
4702   markOop m = old->mark();
4703   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4704                                            : m->age();
4705   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4706                                                              word_sz);
4707   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4708 #ifndef PRODUCT
4709   // Should this evacuation fail?
4710   if (_g1->evacuation_should_fail()) {
4711     if (obj_ptr != NULL) {
4712       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4713       obj_ptr = NULL;
4714     }
4715   }
4716 #endif // !PRODUCT
4717 
4718   if (obj_ptr == NULL) {
4719     // This will either forward-to-self, or detect that someone else has
4720     // installed a forwarding pointer.
4721     return _g1->handle_evacuation_failure_par(_par_scan_state, old);
4722   }
4723 
4724   oop obj = oop(obj_ptr);
4725 
4726   // We're going to allocate linearly, so might as well prefetch ahead.
4727   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4728 
4729   oop forward_ptr = old->forward_to_atomic(obj);
4730   if (forward_ptr == NULL) {
4731     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4732     if (g1p->track_object_age(alloc_purpose)) {
4733       // We could simply do obj->incr_age(). However, this causes a
4734       // performance issue. obj->incr_age() will first check whether
4735       // the object has a displaced mark by checking its mark word;
4736       // getting the mark word from the new location of the object
4737       // stalls. So, given that we already have the mark word and we
4738       // are about to install it anyway, it's better to increase the
4739       // age on the mark word, when the object does not have a
4740       // displaced mark word. We're not expecting many objects to have
4741       // a displaced marked word, so that case is not optimized
4742       // further (it could be...) and we simply call obj->incr_age().
4743 
4744       if (m->has_displaced_mark_helper()) {
4745         // in this case, we have to install the mark word first,
4746         // otherwise obj looks to be forwarded (the old mark word,
4747         // which contains the forward pointer, was copied)
4748         obj->set_mark(m);
4749         obj->incr_age();
4750       } else {
4751         m = m->incr_age();
4752         obj->set_mark(m);
4753       }
4754       _par_scan_state->age_table()->add(obj, word_sz);
4755     } else {
4756       obj->set_mark(m);
4757     }
4758 
4759     size_t* surv_young_words = _par_scan_state->surviving_young_words();
4760     surv_young_words[young_index] += word_sz;
4761 
4762     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4763       // We keep track of the next start index in the length field of
4764       // the to-space object. The actual length can be found in the
4765       // length field of the from-space object.
4766       arrayOop(obj)->set_length(0);
4767       oop* old_p = set_partial_array_mask(old);
4768       _par_scan_state->push_on_queue(old_p);
4769     } else {
4770       // No point in using the slower heap_region_containing() method,
4771       // given that we know obj is in the heap.
4772       _scanner.set_region(_g1->heap_region_containing_raw(obj));
4773       obj->oop_iterate_backwards(&_scanner);
4774     }
4775   } else {
4776     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4777     obj = forward_ptr;
4778   }
4779   return obj;
4780 }
4781 
4782 template <class T>
4783 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4784   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4785     _scanned_klass->record_modified_oops();
4786   }
4787 }
4788 
4789 template <G1Barrier barrier, bool do_mark_object>
4790 template <class T>
4791 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4792   T heap_oop = oopDesc::load_heap_oop(p);
4793 
4794   if (!oopDesc::is_null(heap_oop)) {
4795     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
4796 
4797     assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4798 
4799     if (_g1->in_cset_fast_test(obj)) {
4800       oop forwardee;
4801       if (obj->is_forwarded()) {
4802         forwardee = obj->forwardee();
4803       } else {
4804         forwardee = copy_to_survivor_space(obj);
4805       }
4806       assert(forwardee != NULL, "forwardee should not be NULL");
4807       oopDesc::encode_store_heap_oop(p, forwardee);
4808       if (do_mark_object && forwardee != obj) {
4809         // If the object is self-forwarded we don't need to explicitly
4810         // mark it, the evacuation failure protocol will do so.
4811         mark_forwarded_object(obj, forwardee);
4812       }
4813 
4814       if (barrier == G1BarrierKlass) {
4815         do_klass_barrier(p, forwardee);
4816       }
4817     } else {
4818       // The object is not in collection set. If we're a root scanning
4819       // closure during an initial mark pause (i.e. do_mark_object will
4820       // be true) then attempt to mark the object.
4821       if (do_mark_object) {
4822         assert(_g1->is_in_g1_reserved(obj), "Must reference an object within the heap");
4823         mark_object(obj);
4824       }


5006     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
5007     // both of which need setting by set_n_termination().
5008     _g1h->SharedHeap::set_n_termination(active_workers);
5009     _g1h->set_n_termination(active_workers);
5010     terminator()->reset_for_reuse(active_workers);
5011     _n_workers = active_workers;
5012   }
5013 
5014   void work(uint worker_id) {
5015     if (worker_id >= _n_workers) return;  // no work needed this round
5016 
5017     double start_time_ms = os::elapsedTime() * 1000.0;
5018     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
5019 
5020     {
5021       ResourceMark rm;
5022       HandleMark   hm;
5023 
5024       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
5025 
5026       G1ParScanThreadState            pss(_g1h, worker_id);
5027       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
5028       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5029       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
5030 
5031       pss.set_evac_closure(&scan_evac_cl);
5032       pss.set_evac_failure_closure(&evac_failure_cl);
5033       pss.set_partial_scan_closure(&partial_scan_cl);
5034 
5035       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
5036       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
5037 
5038       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
5039       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
5040 
5041       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
5042       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
5043       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
5044 
5045       OopClosure*                    scan_root_cl = &only_scan_root_cl;
5046       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;


5437 
5438 public:
5439   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5440                      G1CollectedHeap* g1h,
5441                      RefToScanQueueSet *task_queues,
5442                      ParallelTaskTerminator* terminator) :
5443     AbstractGangTask("Process reference objects in parallel"),
5444     _proc_task(proc_task),
5445     _g1h(g1h),
5446     _task_queues(task_queues),
5447     _terminator(terminator)
5448   {}
5449 
5450   virtual void work(uint worker_id) {
5451     // The reference processing task executed by a single worker.
5452     ResourceMark rm;
5453     HandleMark   hm;
5454 
5455     G1STWIsAliveClosure is_alive(_g1h);
5456 
5457     G1ParScanThreadState pss(_g1h, worker_id);
5458 
5459     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5460     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5461     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5462 
5463     pss.set_evac_closure(&scan_evac_cl);
5464     pss.set_evac_failure_closure(&evac_failure_cl);
5465     pss.set_partial_scan_closure(&partial_scan_cl);
5466 
5467     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5468     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5469 
5470     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5471     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5472 
5473     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5474     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5475 
5476     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5477       // We also need to mark copied objects.


5549 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5550 protected:
5551   G1CollectedHeap* _g1h;
5552   RefToScanQueueSet      *_queues;
5553   ParallelTaskTerminator _terminator;
5554   uint _n_workers;
5555 
5556 public:
5557   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5558     AbstractGangTask("ParPreserveCMReferents"),
5559     _g1h(g1h),
5560     _queues(task_queues),
5561     _terminator(workers, _queues),
5562     _n_workers(workers)
5563   { }
5564 
5565   void work(uint worker_id) {
5566     ResourceMark rm;
5567     HandleMark   hm;
5568 
5569     G1ParScanThreadState            pss(_g1h, worker_id);
5570     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5571     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5572     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5573 
5574     pss.set_evac_closure(&scan_evac_cl);
5575     pss.set_evac_failure_closure(&evac_failure_cl);
5576     pss.set_partial_scan_closure(&partial_scan_cl);
5577 
5578     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5579 
5580 
5581     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5582     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5583 
5584     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5585     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5586 
5587     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5588     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5589 


5675                                                  no_of_gc_workers,
5676                                                  _task_queues);
5677 
5678   if (G1CollectedHeap::use_parallel_gc_threads()) {
5679     workers()->run_task(&keep_cm_referents);
5680   } else {
5681     keep_cm_referents.work(0);
5682   }
5683 
5684   set_par_threads(0);
5685 
5686   // Closure to test whether a referent is alive.
5687   G1STWIsAliveClosure is_alive(this);
5688 
5689   // Even when parallel reference processing is enabled, the processing
5690   // of JNI refs is serial and performed serially by the current thread
5691   // rather than by a worker. The following PSS will be used for processing
5692   // JNI refs.
5693 
5694   // Use only a single queue for this PSS.
5695   G1ParScanThreadState pss(this, 0);
5696 
5697   // We do not embed a reference processor in the copying/scanning
5698   // closures while we're actually processing the discovered
5699   // reference objects.
5700   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5701   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5702   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5703 
5704   pss.set_evac_closure(&scan_evac_cl);
5705   pss.set_evac_failure_closure(&evac_failure_cl);
5706   pss.set_partial_scan_closure(&partial_scan_cl);
5707 
5708   assert(pss.refs()->is_empty(), "pre-condition");
5709 
5710   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5711   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5712 
5713   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5714   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5715 




4517   } else {
4518     assert(purpose ==  GCAllocForTenured, "sanity");
4519     HeapWord* result = old_attempt_allocation(word_size);
4520     if (result != NULL) {
4521       return result;
4522     } else {
4523       // Let's try to allocate in the survivors in case we can fit the
4524       // object there.
4525       return survivor_attempt_allocation(word_size);
4526     }
4527   }
4528 
4529   ShouldNotReachHere();
4530   // Trying to keep some compilers happy.
4531   return NULL;
4532 }
4533 
4534 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4535   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4536 
4537 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4538   : _g1h(g1h),
4539     _refs(g1h->task_queue(queue_num)),
4540     _dcq(&g1h->dirty_card_queue_set()),
4541     _ct_bs(g1h->g1_barrier_set()),
4542     _g1_rem(g1h->g1_rem_set()),
4543     _hash_seed(17), _queue_num(queue_num),
4544     _term_attempts(0),
4545     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4546     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4547     _age_table(false), _scanner(g1h, this, rp),
4548     _strong_roots_time(0), _term_time(0),
4549     _alloc_buffer_waste(0), _undo_waste(0) {
4550   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4551   // we "sacrifice" entry 0 to keep track of surviving bytes for
4552   // non-young regions (where the age is -1)
4553   // We also add a few elements at the beginning and at the end in
4554   // an attempt to eliminate cache contention
4555   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4556   uint array_length = PADDING_ELEM_NUM +
4557                       real_length +
4558                       PADDING_ELEM_NUM;
4559   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4560   if (_surviving_young_words_base == NULL)
4561     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4562                           "Not enough space for young surv histo.");
4563   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4564   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4565 
4566   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4567   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;


4672   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4673   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4674   assert(from_obj != to_obj, "should not be self-forwarded");
4675 
4676   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4677   assert(from_hr != NULL, "sanity");
4678   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4679 
4680   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4681   assert(to_hr != NULL, "sanity");
4682   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4683 #endif // ASSERT
4684 
4685   // The object might be in the process of being copied by another
4686   // worker so we cannot trust that its to-space image is
4687   // well-formed. So we have to read its size from its from-space
4688   // image which we know should not be changing.
4689   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4690 }
4691 
4692 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {


4693   size_t word_sz = old->size();
4694   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4695   // +1 to make the -1 indexes valid...
4696   int       young_index = from_region->young_index_in_cset()+1;
4697   assert( (from_region->is_young() && young_index >  0) ||
4698          (!from_region->is_young() && young_index == 0), "invariant" );
4699   G1CollectorPolicy* g1p = _g1h->g1_policy();
4700   markOop m = old->mark();
4701   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4702                                            : m->age();
4703   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4704                                                              word_sz);
4705   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4706 #ifndef PRODUCT
4707   // Should this evacuation fail?
4708   if (_g1h->evacuation_should_fail()) {
4709     if (obj_ptr != NULL) {
4710       undo_allocation(alloc_purpose, obj_ptr, word_sz);
4711       obj_ptr = NULL;
4712     }
4713   }
4714 #endif // !PRODUCT
4715 
4716   if (obj_ptr == NULL) {
4717     // This will either forward-to-self, or detect that someone else has
4718     // installed a forwarding pointer.
4719     return _g1h->handle_evacuation_failure_par(this, old);
4720   }
4721 
4722   oop obj = oop(obj_ptr);
4723 
4724   // We're going to allocate linearly, so might as well prefetch ahead.
4725   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4726 
4727   oop forward_ptr = old->forward_to_atomic(obj);
4728   if (forward_ptr == NULL) {
4729     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4730     if (g1p->track_object_age(alloc_purpose)) {
4731       // We could simply do obj->incr_age(). However, this causes a
4732       // performance issue. obj->incr_age() will first check whether
4733       // the object has a displaced mark by checking its mark word;
4734       // getting the mark word from the new location of the object
4735       // stalls. So, given that we already have the mark word and we
4736       // are about to install it anyway, it's better to increase the
4737       // age on the mark word, when the object does not have a
4738       // displaced mark word. We're not expecting many objects to have
4739       // a displaced marked word, so that case is not optimized
4740       // further (it could be...) and we simply call obj->incr_age().
4741 
4742       if (m->has_displaced_mark_helper()) {
4743         // in this case, we have to install the mark word first,
4744         // otherwise obj looks to be forwarded (the old mark word,
4745         // which contains the forward pointer, was copied)
4746         obj->set_mark(m);
4747         obj->incr_age();
4748       } else {
4749         m = m->incr_age();
4750         obj->set_mark(m);
4751       }
4752       age_table()->add(obj, word_sz);
4753     } else {
4754       obj->set_mark(m);
4755     }
4756 
4757     size_t* surv_young_words = surviving_young_words();
4758     surv_young_words[young_index] += word_sz;
4759 
4760     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4761       // We keep track of the next start index in the length field of
4762       // the to-space object. The actual length can be found in the
4763       // length field of the from-space object.
4764       arrayOop(obj)->set_length(0);
4765       oop* old_p = set_partial_array_mask(old);
4766       push_on_queue(old_p);
4767     } else {
4768       // No point in using the slower heap_region_containing() method,
4769       // given that we know obj is in the heap.
4770       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4771       obj->oop_iterate_backwards(&_scanner);
4772     }
4773   } else {
4774     undo_allocation(alloc_purpose, obj_ptr, word_sz);
4775     obj = forward_ptr;
4776   }
4777   return obj;
4778 }
4779 
4780 template <class T>
4781 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4782   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4783     _scanned_klass->record_modified_oops();
4784   }
4785 }
4786 
4787 template <G1Barrier barrier, bool do_mark_object>
4788 template <class T>
4789 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4790   T heap_oop = oopDesc::load_heap_oop(p);
4791 
4792   if (!oopDesc::is_null(heap_oop)) {
4793     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
4794 
4795     assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4796 
4797     if (_g1->in_cset_fast_test(obj)) {
4798       oop forwardee;
4799       if (obj->is_forwarded()) {
4800         forwardee = obj->forwardee();
4801       } else {
4802         forwardee = _par_scan_state->copy_to_survivor_space(obj);
4803       }
4804       assert(forwardee != NULL, "forwardee should not be NULL");
4805       oopDesc::encode_store_heap_oop(p, forwardee);
4806       if (do_mark_object && forwardee != obj) {
4807         // If the object is self-forwarded we don't need to explicitly
4808         // mark it, the evacuation failure protocol will do so.
4809         mark_forwarded_object(obj, forwardee);
4810       }
4811 
4812       if (barrier == G1BarrierKlass) {
4813         do_klass_barrier(p, forwardee);
4814       }
4815     } else {
4816       // The object is not in collection set. If we're a root scanning
4817       // closure during an initial mark pause (i.e. do_mark_object will
4818       // be true) then attempt to mark the object.
4819       if (do_mark_object) {
4820         assert(_g1->is_in_g1_reserved(obj), "Must reference an object within the heap");
4821         mark_object(obj);
4822       }


5004     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
5005     // both of which need setting by set_n_termination().
5006     _g1h->SharedHeap::set_n_termination(active_workers);
5007     _g1h->set_n_termination(active_workers);
5008     terminator()->reset_for_reuse(active_workers);
5009     _n_workers = active_workers;
5010   }
5011 
5012   void work(uint worker_id) {
5013     if (worker_id >= _n_workers) return;  // no work needed this round
5014 
5015     double start_time_ms = os::elapsedTime() * 1000.0;
5016     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
5017 
5018     {
5019       ResourceMark rm;
5020       HandleMark   hm;
5021 
5022       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
5023 
5024       G1ParScanThreadState            pss(_g1h, worker_id, rp);
5025       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
5026       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5027       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
5028 
5029       pss.set_evac_closure(&scan_evac_cl);
5030       pss.set_evac_failure_closure(&evac_failure_cl);
5031       pss.set_partial_scan_closure(&partial_scan_cl);
5032 
5033       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
5034       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
5035 
5036       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
5037       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
5038 
5039       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
5040       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
5041       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
5042 
5043       OopClosure*                    scan_root_cl = &only_scan_root_cl;
5044       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;


5435 
5436 public:
5437   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5438                      G1CollectedHeap* g1h,
5439                      RefToScanQueueSet *task_queues,
5440                      ParallelTaskTerminator* terminator) :
5441     AbstractGangTask("Process reference objects in parallel"),
5442     _proc_task(proc_task),
5443     _g1h(g1h),
5444     _task_queues(task_queues),
5445     _terminator(terminator)
5446   {}
5447 
5448   virtual void work(uint worker_id) {
5449     // The reference processing task executed by a single worker.
5450     ResourceMark rm;
5451     HandleMark   hm;
5452 
5453     G1STWIsAliveClosure is_alive(_g1h);
5454 
5455     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5456 
5457     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5458     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5459     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5460 
5461     pss.set_evac_closure(&scan_evac_cl);
5462     pss.set_evac_failure_closure(&evac_failure_cl);
5463     pss.set_partial_scan_closure(&partial_scan_cl);
5464 
5465     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5466     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5467 
5468     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5469     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5470 
5471     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5472     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5473 
5474     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5475       // We also need to mark copied objects.


5547 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5548 protected:
5549   G1CollectedHeap* _g1h;
5550   RefToScanQueueSet      *_queues;
5551   ParallelTaskTerminator _terminator;
5552   uint _n_workers;
5553 
5554 public:
5555   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5556     AbstractGangTask("ParPreserveCMReferents"),
5557     _g1h(g1h),
5558     _queues(task_queues),
5559     _terminator(workers, _queues),
5560     _n_workers(workers)
5561   { }
5562 
5563   void work(uint worker_id) {
5564     ResourceMark rm;
5565     HandleMark   hm;
5566 
5567     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5568     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5569     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5570     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5571 
5572     pss.set_evac_closure(&scan_evac_cl);
5573     pss.set_evac_failure_closure(&evac_failure_cl);
5574     pss.set_partial_scan_closure(&partial_scan_cl);
5575 
5576     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5577 
5578 
5579     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5580     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5581 
5582     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5583     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5584 
5585     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5586     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5587 


5673                                                  no_of_gc_workers,
5674                                                  _task_queues);
5675 
5676   if (G1CollectedHeap::use_parallel_gc_threads()) {
5677     workers()->run_task(&keep_cm_referents);
5678   } else {
5679     keep_cm_referents.work(0);
5680   }
5681 
5682   set_par_threads(0);
5683 
5684   // Closure to test whether a referent is alive.
5685   G1STWIsAliveClosure is_alive(this);
5686 
5687   // Even when parallel reference processing is enabled, the processing
5688   // of JNI refs is serial and performed serially by the current thread
5689   // rather than by a worker. The following PSS will be used for processing
5690   // JNI refs.
5691 
5692   // Use only a single queue for this PSS.
5693   G1ParScanThreadState            pss(this, 0, NULL);
5694 
5695   // We do not embed a reference processor in the copying/scanning
5696   // closures while we're actually processing the discovered
5697   // reference objects.
5698   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5699   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5700   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5701 
5702   pss.set_evac_closure(&scan_evac_cl);
5703   pss.set_evac_failure_closure(&evac_failure_cl);
5704   pss.set_partial_scan_closure(&partial_scan_cl);
5705 
5706   assert(pss.refs()->is_empty(), "pre-condition");
5707 
5708   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5709   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5710 
5711   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5712   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5713