src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
rev 6326 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled. This causes wrong calculation of PLAB sizes, as the amount of space wasted is not updated correctly.
Reviewed-by: brutisso
rev 6327 : 8019342: G1: High "Other" time most likely due to card redirtying
Summary: Parallelize card redirtying to decrease the time it takes.
Reviewed-by: brutisso
rev 6328 : 8040002: Clean up code and code duplication in re-diryting cards for verification
Summary: Card re-dirtying code for verification and actual redirtying uses two different, almost completely identical card closures. Also the verification code still assumes a perm gen.
Reviewed-by: brutisso, jmasa
rev 6334 : 8035400: Move G1ParScanThreadState into its own files
Summary: Extract the G1ParScanThreadState class from G1CollectedHeap.?pp into its own files.
Reviewed-by: brutisso, mgerdin


  25 #if !defined(__clang_major__) && defined(__GNUC__)
  26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
  27 #endif
  28 
  29 #include "precompiled.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  33 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  34 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  35 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  36 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  37 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  38 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  39 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  40 #include "gc_implementation/g1/g1EvacFailure.hpp"
  41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  42 #include "gc_implementation/g1/g1Log.hpp"
  43 #include "gc_implementation/g1/g1MarkSweep.hpp"
  44 #include "gc_implementation/g1/g1OopClosures.inline.hpp"

  45 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  46 #include "gc_implementation/g1/g1StringDedup.hpp"
  47 #include "gc_implementation/g1/g1YCTypes.hpp"
  48 #include "gc_implementation/g1/heapRegion.inline.hpp"
  49 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  50 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  51 #include "gc_implementation/g1/vm_operations_g1.hpp"
  52 #include "gc_implementation/shared/gcHeapSummary.hpp"
  53 #include "gc_implementation/shared/gcTimer.hpp"
  54 #include "gc_implementation/shared/gcTrace.hpp"
  55 #include "gc_implementation/shared/gcTraceTime.hpp"
  56 #include "gc_implementation/shared/isGCActiveMark.hpp"
  57 #include "memory/gcLocker.inline.hpp"
  58 #include "memory/generationSpec.hpp"
  59 #include "memory/iterator.hpp"
  60 #include "memory/referenceProcessor.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "oops/oop.pcgc.inline.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "runtime/orderAccess.inline.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "utilities/ticks.hpp"
  67 
  68 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  69 
  70 // turn it on so that the contents of the young list (scan-only /
  71 // to-be-collected) are printed at "strategic" points before / during
  72 // / after the collection --- this is useful for debugging
  73 #define YOUNG_LIST_VERBOSE 0
  74 // CURRENT STATUS
  75 // This file is under construction.  Search for "FIXME".
  76 
  77 // INVARIANTS/NOTES
  78 //
  79 // All allocation activity covered by the G1CollectedHeap interface is
  80 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  81 // and allocate_new_tlab, which are the "entry" points to the
  82 // allocation code from the rest of the JVM.  (Note that this does not
  83 // apply to TLAB allocation, which is not part of this interface: it
  84 // is done by clients of this interface.)
  85 
  86 // Notes on implementation of parallelism in different tasks.


4551   } else {
4552     assert(purpose ==  GCAllocForTenured, "sanity");
4553     HeapWord* result = old_attempt_allocation(word_size);
4554     if (result != NULL) {
4555       return result;
4556     } else {
4557       // Let's try to allocate in the survivors in case we can fit the
4558       // object there.
4559       return survivor_attempt_allocation(word_size);
4560     }
4561   }
4562 
4563   ShouldNotReachHere();
4564   // Trying to keep some compilers happy.
4565   return NULL;
4566 }
4567 
4568 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4569   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
4570 
4571 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4572   : _g1h(g1h),
4573     _refs(g1h->task_queue(queue_num)),
4574     _dcq(&g1h->dirty_card_queue_set()),
4575     _ct_bs(g1h->g1_barrier_set()),
4576     _g1_rem(g1h->g1_rem_set()),
4577     _hash_seed(17), _queue_num(queue_num),
4578     _term_attempts(0),
4579     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4580     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4581     _age_table(false), _scanner(g1h, this, rp),
4582     _strong_roots_time(0), _term_time(0),
4583     _alloc_buffer_waste(0), _undo_waste(0) {
4584   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4585   // we "sacrifice" entry 0 to keep track of surviving bytes for
4586   // non-young regions (where the age is -1)
4587   // We also add a few elements at the beginning and at the end in
4588   // an attempt to eliminate cache contention
4589   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4590   uint array_length = PADDING_ELEM_NUM +
4591                       real_length +
4592                       PADDING_ELEM_NUM;
4593   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4594   if (_surviving_young_words_base == NULL)
4595     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4596                           "Not enough space for young surv histo.");
4597   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4598   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4599 
4600   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4601   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4602 
4603   _start = os::elapsedTime();
4604 }
4605 
4606 void
4607 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4608 {
4609   st->print_raw_cr("GC Termination Stats");
4610   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4611                    " ------waste (KiB)------");
4612   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4613                    "  total   alloc    undo");
4614   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4615                    " ------- ------- -------");
4616 }
4617 
4618 void
4619 G1ParScanThreadState::print_termination_stats(int i,
4620                                               outputStream* const st) const
4621 {
4622   const double elapsed_ms = elapsed_time() * 1000.0;
4623   const double s_roots_ms = strong_roots_time() * 1000.0;
4624   const double term_ms    = term_time() * 1000.0;
4625   st->print_cr("%3d %9.2f %9.2f %6.2f "
4626                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4627                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4628                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4629                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4630                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4631                alloc_buffer_waste() * HeapWordSize / K,
4632                undo_waste() * HeapWordSize / K);
4633 }
4634 
4635 #ifdef ASSERT
4636 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4637   assert(ref != NULL, "invariant");
4638   assert(UseCompressedOops, "sanity");
4639   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4640   oop p = oopDesc::load_decode_heap_oop(ref);
4641   assert(_g1h->is_in_g1_reserved(p),
4642          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4643   return true;
4644 }
4645 
4646 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4647   assert(ref != NULL, "invariant");
4648   if (has_partial_array_mask(ref)) {
4649     // Must be in the collection set--it's already been copied.
4650     oop p = clear_partial_array_mask(ref);
4651     assert(_g1h->obj_in_cs(p),
4652            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4653   } else {
4654     oop p = oopDesc::load_decode_heap_oop(ref);
4655     assert(_g1h->is_in_g1_reserved(p),
4656            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4657   }
4658   return true;
4659 }
4660 
4661 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4662   if (ref.is_narrow()) {
4663     return verify_ref((narrowOop*) ref);
4664   } else {
4665     return verify_ref((oop*) ref);
4666   }
4667 }
4668 #endif // ASSERT
4669 
4670 void G1ParScanThreadState::trim_queue() {
4671   assert(_evac_failure_cl != NULL, "not set");
4672 
4673   StarTask ref;
4674   do {
4675     // Drain the overflow stack first, so other threads can steal.
4676     while (refs()->pop_overflow(ref)) {
4677       deal_with_reference(ref);
4678     }
4679 
4680     while (refs()->pop_local(ref)) {
4681       deal_with_reference(ref);
4682     }
4683   } while (!refs()->is_empty());
4684 }
4685 
4686 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4687                                      G1ParScanThreadState* par_scan_state) :
4688   _g1(g1), _par_scan_state(par_scan_state),
4689   _worker_id(par_scan_state->queue_num()) { }
4690 
4691 void G1ParCopyHelper::mark_object(oop obj) {
4692 #ifdef ASSERT
4693   HeapRegion* hr = _g1->heap_region_containing(obj);
4694   assert(hr != NULL, "sanity");
4695   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4696 #endif // ASSERT
4697 
4698   // We know that the object is not moving so it's safe to read its size.
4699   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4700 }
4701 
4702 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4703 #ifdef ASSERT
4704   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4705   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4706   assert(from_obj != to_obj, "should not be self-forwarded");
4707 
4708   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4709   assert(from_hr != NULL, "sanity");
4710   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4711 
4712   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4713   assert(to_hr != NULL, "sanity");
4714   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4715 #endif // ASSERT
4716 
4717   // The object might be in the process of being copied by another
4718   // worker so we cannot trust that its to-space image is
4719   // well-formed. So we have to read its size from its from-space
4720   // image which we know should not be changing.
4721   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4722 }
4723 
4724 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
4725   size_t word_sz = old->size();
4726   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4727   // +1 to make the -1 indexes valid...
4728   int       young_index = from_region->young_index_in_cset()+1;
4729   assert( (from_region->is_young() && young_index >  0) ||
4730          (!from_region->is_young() && young_index == 0), "invariant" );
4731   G1CollectorPolicy* g1p = _g1h->g1_policy();
4732   markOop m = old->mark();
4733   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4734                                            : m->age();
4735   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4736                                                              word_sz);
4737   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4738 #ifndef PRODUCT
4739   // Should this evacuation fail?
4740   if (_g1h->evacuation_should_fail()) {
4741     if (obj_ptr != NULL) {
4742       undo_allocation(alloc_purpose, obj_ptr, word_sz);
4743       obj_ptr = NULL;
4744     }
4745   }
4746 #endif // !PRODUCT
4747 
4748   if (obj_ptr == NULL) {
4749     // This will either forward-to-self, or detect that someone else has
4750     // installed a forwarding pointer.
4751     return _g1h->handle_evacuation_failure_par(this, old);
4752   }
4753 
4754   oop obj = oop(obj_ptr);
4755 
4756   // We're going to allocate linearly, so might as well prefetch ahead.
4757   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4758 
4759   oop forward_ptr = old->forward_to_atomic(obj);
4760   if (forward_ptr == NULL) {
4761     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4762 
4763     // alloc_purpose is just a hint to allocate() above, recheck the type of region
4764     // we actually allocated from and update alloc_purpose accordingly
4765     HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
4766     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
4767 
4768     if (g1p->track_object_age(alloc_purpose)) {
4769       // We could simply do obj->incr_age(). However, this causes a
4770       // performance issue. obj->incr_age() will first check whether
4771       // the object has a displaced mark by checking its mark word;
4772       // getting the mark word from the new location of the object
4773       // stalls. So, given that we already have the mark word and we
4774       // are about to install it anyway, it's better to increase the
4775       // age on the mark word, when the object does not have a
4776       // displaced mark word. We're not expecting many objects to have
4777       // a displaced marked word, so that case is not optimized
4778       // further (it could be...) and we simply call obj->incr_age().
4779 
4780       if (m->has_displaced_mark_helper()) {
4781         // in this case, we have to install the mark word first,
4782         // otherwise obj looks to be forwarded (the old mark word,
4783         // which contains the forward pointer, was copied)
4784         obj->set_mark(m);
4785         obj->incr_age();
4786       } else {
4787         m = m->incr_age();
4788         obj->set_mark(m);
4789       }
4790       age_table()->add(obj, word_sz);
4791     } else {
4792       obj->set_mark(m);
4793     }
4794 
4795     if (G1StringDedup::is_enabled()) {
4796       G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
4797                                              to_region->is_young(),
4798                                              queue_num(),
4799                                              obj);
4800     }
4801 
4802     size_t* surv_young_words = surviving_young_words();
4803     surv_young_words[young_index] += word_sz;
4804 
4805     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4806       // We keep track of the next start index in the length field of
4807       // the to-space object. The actual length can be found in the
4808       // length field of the from-space object.
4809       arrayOop(obj)->set_length(0);
4810       oop* old_p = set_partial_array_mask(old);
4811       push_on_queue(old_p);
4812     } else {
4813       // No point in using the slower heap_region_containing() method,
4814       // given that we know obj is in the heap.
4815       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4816       obj->oop_iterate_backwards(&_scanner);
4817     }
4818   } else {
4819     undo_allocation(alloc_purpose, obj_ptr, word_sz);
4820     obj = forward_ptr;
4821   }
4822   return obj;
4823 }
4824 
4825 template <class T>
4826 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4827   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4828     _scanned_klass->record_modified_oops();
4829   }
4830 }
4831 
4832 template <G1Barrier barrier, bool do_mark_object>
4833 template <class T>
4834 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4835   T heap_oop = oopDesc::load_heap_oop(p);
4836 
4837   if (oopDesc::is_null(heap_oop)) {
4838     return;
4839   }
4840 
4841   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4842 
4843   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4844 




  25 #if !defined(__clang_major__) && defined(__GNUC__)
  26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
  27 #endif
  28 
  29 #include "precompiled.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  33 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  34 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  35 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  36 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  37 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  38 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  39 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  40 #include "gc_implementation/g1/g1EvacFailure.hpp"
  41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  42 #include "gc_implementation/g1/g1Log.hpp"
  43 #include "gc_implementation/g1/g1MarkSweep.hpp"
  44 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  45 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  46 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  47 #include "gc_implementation/g1/g1StringDedup.hpp"
  48 #include "gc_implementation/g1/g1YCTypes.hpp"
  49 #include "gc_implementation/g1/heapRegion.inline.hpp"
  50 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  51 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  52 #include "gc_implementation/g1/vm_operations_g1.hpp"
  53 #include "gc_implementation/shared/gcHeapSummary.hpp"
  54 #include "gc_implementation/shared/gcTimer.hpp"
  55 #include "gc_implementation/shared/gcTrace.hpp"
  56 #include "gc_implementation/shared/gcTraceTime.hpp"
  57 #include "gc_implementation/shared/isGCActiveMark.hpp"
  58 #include "memory/gcLocker.inline.hpp"
  59 #include "memory/generationSpec.hpp"
  60 #include "memory/iterator.hpp"
  61 #include "memory/referenceProcessor.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "oops/oop.pcgc.inline.hpp"

  64 #include "runtime/orderAccess.inline.hpp"
  65 #include "runtime/vmThread.hpp"

  66 
  67 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  68 
  69 // turn it on so that the contents of the young list (scan-only /
  70 // to-be-collected) are printed at "strategic" points before / during
  71 // / after the collection --- this is useful for debugging
  72 #define YOUNG_LIST_VERBOSE 0
  73 // CURRENT STATUS
  74 // This file is under construction.  Search for "FIXME".
  75 
  76 // INVARIANTS/NOTES
  77 //
  78 // All allocation activity covered by the G1CollectedHeap interface is
  79 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  80 // and allocate_new_tlab, which are the "entry" points to the
  81 // allocation code from the rest of the JVM.  (Note that this does not
  82 // apply to TLAB allocation, which is not part of this interface: it
  83 // is done by clients of this interface.)
  84 
  85 // Notes on implementation of parallelism in different tasks.


4550   } else {
4551     assert(purpose ==  GCAllocForTenured, "sanity");
4552     HeapWord* result = old_attempt_allocation(word_size);
4553     if (result != NULL) {
4554       return result;
4555     } else {
4556       // Let's try to allocate in the survivors in case we can fit the
4557       // object there.
4558       return survivor_attempt_allocation(word_size);
4559     }
4560   }
4561 
4562   ShouldNotReachHere();
4563   // Trying to keep some compilers happy.
4564   return NULL;
4565 }
4566 
4567 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4568   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
4569 
























































































































4570 void G1ParCopyHelper::mark_object(oop obj) {
4571 #ifdef ASSERT
4572   HeapRegion* hr = _g1->heap_region_containing(obj);
4573   assert(hr != NULL, "sanity");
4574   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4575 #endif // ASSERT
4576 
4577   // We know that the object is not moving so it's safe to read its size.
4578   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4579 }
4580 
4581 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4582 #ifdef ASSERT
4583   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4584   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4585   assert(from_obj != to_obj, "should not be self-forwarded");
4586 
4587   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4588   assert(from_hr != NULL, "sanity");
4589   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4590 
4591   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4592   assert(to_hr != NULL, "sanity");
4593   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4594 #endif // ASSERT
4595 
4596   // The object might be in the process of being copied by another
4597   // worker so we cannot trust that its to-space image is
4598   // well-formed. So we have to read its size from its from-space
4599   // image which we know should not be changing.
4600   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4601 }
4602 





































































































4603 template <class T>
4604 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4605   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4606     _scanned_klass->record_modified_oops();
4607   }
4608 }
4609 
4610 template <G1Barrier barrier, bool do_mark_object>
4611 template <class T>
4612 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4613   T heap_oop = oopDesc::load_heap_oop(p);
4614 
4615   if (oopDesc::is_null(heap_oop)) {
4616     return;
4617   }
4618 
4619   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4620 
4621   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4622