33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
36 #include "gc_implementation/g1/g1EvacFailure.hpp"
37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
38 #include "gc_implementation/g1/g1Log.hpp"
39 #include "gc_implementation/g1/g1MarkSweep.hpp"
40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
42 #include "gc_implementation/g1/g1YCTypes.hpp"
43 #include "gc_implementation/g1/heapRegion.inline.hpp"
44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
46 #include "gc_implementation/g1/vm_operations_g1.hpp"
47 #include "gc_implementation/shared/gcHeapSummary.hpp"
48 #include "gc_implementation/shared/gcTimer.hpp"
49 #include "gc_implementation/shared/gcTrace.hpp"
50 #include "gc_implementation/shared/gcTraceTime.hpp"
51 #include "gc_implementation/shared/isGCActiveMark.hpp"
52 #include "memory/gcLocker.inline.hpp"
53 #include "memory/genOopClosures.inline.hpp"
54 #include "memory/generationSpec.hpp"
55 #include "memory/referenceProcessor.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/oop.pcgc.inline.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "utilities/ticks.hpp"
60
61 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
62
63 // turn it on so that the contents of the young list (scan-only /
64 // to-be-collected) are printed at "strategic" points before / during
65 // / after the collection --- this is useful for debugging
66 #define YOUNG_LIST_VERBOSE 0
67 // CURRENT STATUS
68 // This file is under construction. Search for "FIXME".
69
70 // INVARIANTS/NOTES
71 //
72 // All allocation activity covered by the G1CollectedHeap interface is
73 // serialized by acquiring the HeapLock. This happens in mem_allocate
74 // and allocate_new_tlab, which are the "entry" points to the
3060 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3061 switch (vo) {
3062 case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3063 case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3064 case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();
3065 default: ShouldNotReachHere();
3066 }
3067 return false; // keep some compilers happy
3068 }
3069
3070 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3071 switch (vo) {
3072 case VerifyOption_G1UsePrevMarking: return "PTAMS";
3073 case VerifyOption_G1UseNextMarking: return "NTAMS";
3074 case VerifyOption_G1UseMarkWord: return "NONE";
3075 default: ShouldNotReachHere();
3076 }
3077 return NULL; // keep some compilers happy
3078 }
3079
3080 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3081 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3082 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3083 // we can change this closure to extend the simpler OopClosure.
3084 class VerifyRootsClosure: public OopsInGenClosure {
3085 private:
3086 G1CollectedHeap* _g1h;
3087 VerifyOption _vo;
3088 bool _failures;
3089 public:
3090 // _vo == UsePrevMarking -> use "prev" marking information,
3091 // _vo == UseNextMarking -> use "next" marking information,
3092 // _vo == UseMarkWord -> use mark word from object header.
3093 VerifyRootsClosure(VerifyOption vo) :
3094 _g1h(G1CollectedHeap::heap()),
3095 _vo(vo),
3096 _failures(false) { }
3097
3098 bool failures() { return _failures; }
3099
3100 template <class T> void do_oop_nv(T* p) {
3101 T heap_oop = oopDesc::load_heap_oop(p);
3102 if (!oopDesc::is_null(heap_oop)) {
3103 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3104 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3105 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3106 "points to dead obj "PTR_FORMAT, p, (void*) obj);
3107 if (_vo == VerifyOption_G1UseMarkWord) {
3108 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3109 }
3110 obj->print_on(gclog_or_tty);
3111 _failures = true;
3112 }
3113 }
3114 }
3115
3116 void do_oop(oop* p) { do_oop_nv(p); }
3117 void do_oop(narrowOop* p) { do_oop_nv(p); }
3118 };
3119
3120 class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
3121 G1CollectedHeap* _g1h;
3122 OopClosure* _root_cl;
3123 nmethod* _nm;
3124 VerifyOption _vo;
3125 bool _failures;
3126
3127 template <class T> void do_oop_work(T* p) {
3128 // First verify that this root is live
3129 _root_cl->do_oop(p);
3130
3131 if (!G1VerifyHeapRegionCodeRoots) {
3132 // We're not verifying the code roots attached to heap region.
3133 return;
3134 }
3135
3136 // Don't check the code roots during marking verification in a full GC
3137 if (_vo == VerifyOption_G1UseMarkWord) {
3138 return;
3139 }
3140
4634 do {
4635 // Drain the overflow stack first, so other threads can steal.
4636 while (refs()->pop_overflow(ref)) {
4637 deal_with_reference(ref);
4638 }
4639
4640 while (refs()->pop_local(ref)) {
4641 deal_with_reference(ref);
4642 }
4643 } while (!refs()->is_empty());
4644 }
4645
4646 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4647 G1ParScanThreadState* par_scan_state) :
4648 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4649 _par_scan_state(par_scan_state),
4650 _worker_id(par_scan_state->queue_num()),
4651 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4652 _mark_in_progress(_g1->mark_in_progress()) { }
4653
4654 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4655 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4656 #ifdef ASSERT
4657 HeapRegion* hr = _g1->heap_region_containing(obj);
4658 assert(hr != NULL, "sanity");
4659 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4660 #endif // ASSERT
4661
4662 // We know that the object is not moving so it's safe to read its size.
4663 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4664 }
4665
4666 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4667 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4668 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4669 #ifdef ASSERT
4670 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4671 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4672 assert(from_obj != to_obj, "should not be self-forwarded");
4673
4674 HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4675 assert(from_hr != NULL, "sanity");
4676 assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4677
4678 HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4679 assert(to_hr != NULL, "sanity");
4680 assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4681 #endif // ASSERT
4682
4683 // The object might be in the process of being copied by another
4684 // worker so we cannot trust that its to-space image is
4685 // well-formed. So we have to read its size from its from-space
4686 // image which we know should not be changing.
4687 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4688 }
4689
4690 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4691 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4692 ::copy_to_survivor_space(oop old) {
4693 size_t word_sz = old->size();
4694 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4695 // +1 to make the -1 indexes valid...
4696 int young_index = from_region->young_index_in_cset()+1;
4697 assert( (from_region->is_young() && young_index > 0) ||
4698 (!from_region->is_young() && young_index == 0), "invariant" );
4699 G1CollectorPolicy* g1p = _g1->g1_policy();
4700 markOop m = old->mark();
4701 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4702 : m->age();
4703 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4704 word_sz);
4705 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4706 #ifndef PRODUCT
4707 // Should this evacuation fail?
4708 if (_g1->evacuation_should_fail()) {
4709 if (obj_ptr != NULL) {
4710 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4711 obj_ptr = NULL;
4767 } else {
4768 // No point in using the slower heap_region_containing() method,
4769 // given that we know obj is in the heap.
4770 _scanner.set_region(_g1->heap_region_containing_raw(obj));
4771 obj->oop_iterate_backwards(&_scanner);
4772 }
4773 } else {
4774 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4775 obj = forward_ptr;
4776 }
4777 return obj;
4778 }
4779
4780 template <class T>
4781 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4782 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4783 _scanned_klass->record_modified_oops();
4784 }
4785 }
4786
4787 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4788 template <class T>
4789 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4790 ::do_oop_work(T* p) {
4791 oop obj = oopDesc::load_decode_heap_oop(p);
4792 assert(barrier != G1BarrierRS || obj != NULL,
4793 "Precondition: G1BarrierRS implies obj is non-NULL");
4794
4795 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4796
4797 // here the null check is implicit in the cset_fast_test() test
4798 if (_g1->in_cset_fast_test(obj)) {
4799 oop forwardee;
4800 if (obj->is_forwarded()) {
4801 forwardee = obj->forwardee();
4802 } else {
4803 forwardee = copy_to_survivor_space(obj);
4804 }
4805 assert(forwardee != NULL, "forwardee should not be NULL");
4806 oopDesc::encode_store_heap_oop(p, forwardee);
4807 if (do_mark_object && forwardee != obj) {
4808 // If the object is self-forwarded we don't need to explicitly
4809 // mark it, the evacuation failure protocol will do so.
4810 mark_forwarded_object(obj, forwardee);
4811 }
4812
4813 // When scanning the RS, we only care about objs in CS.
4814 if (barrier == G1BarrierRS) {
4815 _par_scan_state->update_rs(_from, p, _worker_id);
4816 } else if (barrier == G1BarrierKlass) {
4817 do_klass_barrier(p, forwardee);
4818 }
4819 } else {
4820 // The object is not in collection set. If we're a root scanning
4821 // closure during an initial mark pause (i.e. do_mark_object will
4822 // be true) then attempt to mark the object.
4823 if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4824 mark_object(obj);
4825 }
4826 }
4827
4828 if (barrier == G1BarrierEvac && obj != NULL) {
4829 _par_scan_state->update_rs(_from, p, _worker_id);
4830 }
4831
4832 if (do_gen_barrier && obj != NULL) {
4833 par_do_barrier(p);
4834 }
4835 }
4836
4837 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4838 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4839
4840 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4841 assert(has_partial_array_mask(p), "invariant");
4842 oop from_obj = clear_partial_array_mask(p);
4843
4844 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4845 assert(from_obj->is_objArray(), "must be obj array");
4846 objArrayOop from_obj_array = objArrayOop(from_obj);
4847 // The from-space object contains the real length.
4848 int length = from_obj_array->length();
4849
4850 assert(from_obj->is_forwarded(), "must be forwarded");
4851 oop to_obj = from_obj->forwardee();
4852 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4853 objArrayOop to_obj_array = objArrayOop(to_obj);
4854 // We keep track of the next start index in the length field of the
4855 // to-space object.
4856 int next_index = to_obj_array->length();
4857 assert(0 <= next_index && next_index < length,
4858 err_msg("invariant, next index: %d, length: %d", next_index, length));
|
33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
36 #include "gc_implementation/g1/g1EvacFailure.hpp"
37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
38 #include "gc_implementation/g1/g1Log.hpp"
39 #include "gc_implementation/g1/g1MarkSweep.hpp"
40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
42 #include "gc_implementation/g1/g1YCTypes.hpp"
43 #include "gc_implementation/g1/heapRegion.inline.hpp"
44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
46 #include "gc_implementation/g1/vm_operations_g1.hpp"
47 #include "gc_implementation/shared/gcHeapSummary.hpp"
48 #include "gc_implementation/shared/gcTimer.hpp"
49 #include "gc_implementation/shared/gcTrace.hpp"
50 #include "gc_implementation/shared/gcTraceTime.hpp"
51 #include "gc_implementation/shared/isGCActiveMark.hpp"
52 #include "memory/gcLocker.inline.hpp"
53 #include "memory/generationSpec.hpp"
54 #include "memory/iterator.hpp"
55 #include "memory/referenceProcessor.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/oop.pcgc.inline.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "utilities/ticks.hpp"
60
61 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
62
63 // turn it on so that the contents of the young list (scan-only /
64 // to-be-collected) are printed at "strategic" points before / during
65 // / after the collection --- this is useful for debugging
66 #define YOUNG_LIST_VERBOSE 0
67 // CURRENT STATUS
68 // This file is under construction. Search for "FIXME".
69
70 // INVARIANTS/NOTES
71 //
72 // All allocation activity covered by the G1CollectedHeap interface is
73 // serialized by acquiring the HeapLock. This happens in mem_allocate
74 // and allocate_new_tlab, which are the "entry" points to the
3060 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3061 switch (vo) {
3062 case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3063 case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3064 case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();
3065 default: ShouldNotReachHere();
3066 }
3067 return false; // keep some compilers happy
3068 }
3069
3070 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3071 switch (vo) {
3072 case VerifyOption_G1UsePrevMarking: return "PTAMS";
3073 case VerifyOption_G1UseNextMarking: return "NTAMS";
3074 case VerifyOption_G1UseMarkWord: return "NONE";
3075 default: ShouldNotReachHere();
3076 }
3077 return NULL; // keep some compilers happy
3078 }
3079
3080 class VerifyRootsClosure: public OopClosure {
3081 private:
3082 G1CollectedHeap* _g1h;
3083 VerifyOption _vo;
3084 bool _failures;
3085 public:
3086 // _vo == UsePrevMarking -> use "prev" marking information,
3087 // _vo == UseNextMarking -> use "next" marking information,
3088 // _vo == UseMarkWord -> use mark word from object header.
3089 VerifyRootsClosure(VerifyOption vo) :
3090 _g1h(G1CollectedHeap::heap()),
3091 _vo(vo),
3092 _failures(false) { }
3093
3094 bool failures() { return _failures; }
3095
3096 template <class T> void do_oop_nv(T* p) {
3097 T heap_oop = oopDesc::load_heap_oop(p);
3098 if (!oopDesc::is_null(heap_oop)) {
3099 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3100 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3101 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3102 "points to dead obj "PTR_FORMAT, p, (void*) obj);
3103 if (_vo == VerifyOption_G1UseMarkWord) {
3104 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3105 }
3106 obj->print_on(gclog_or_tty);
3107 _failures = true;
3108 }
3109 }
3110 }
3111
3112 void do_oop(oop* p) { do_oop_nv(p); }
3113 void do_oop(narrowOop* p) { do_oop_nv(p); }
3114 };
3115
3116 class G1VerifyCodeRootOopClosure: public OopClosure {
3117 G1CollectedHeap* _g1h;
3118 OopClosure* _root_cl;
3119 nmethod* _nm;
3120 VerifyOption _vo;
3121 bool _failures;
3122
3123 template <class T> void do_oop_work(T* p) {
3124 // First verify that this root is live
3125 _root_cl->do_oop(p);
3126
3127 if (!G1VerifyHeapRegionCodeRoots) {
3128 // We're not verifying the code roots attached to heap region.
3129 return;
3130 }
3131
3132 // Don't check the code roots during marking verification in a full GC
3133 if (_vo == VerifyOption_G1UseMarkWord) {
3134 return;
3135 }
3136
4630 do {
4631 // Drain the overflow stack first, so other threads can steal.
4632 while (refs()->pop_overflow(ref)) {
4633 deal_with_reference(ref);
4634 }
4635
4636 while (refs()->pop_local(ref)) {
4637 deal_with_reference(ref);
4638 }
4639 } while (!refs()->is_empty());
4640 }
4641
4642 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4643 G1ParScanThreadState* par_scan_state) :
4644 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4645 _par_scan_state(par_scan_state),
4646 _worker_id(par_scan_state->queue_num()),
4647 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4648 _mark_in_progress(_g1->mark_in_progress()) { }
4649
4650 template <G1Barrier barrier, bool do_mark_object>
4651 void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
4652 #ifdef ASSERT
4653 HeapRegion* hr = _g1->heap_region_containing(obj);
4654 assert(hr != NULL, "sanity");
4655 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4656 #endif // ASSERT
4657
4658 // We know that the object is not moving so it's safe to read its size.
4659 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4660 }
4661
4662 template <G1Barrier barrier, bool do_mark_object>
4663 void G1ParCopyClosure<barrier, do_mark_object>
4664 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4665 #ifdef ASSERT
4666 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4667 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4668 assert(from_obj != to_obj, "should not be self-forwarded");
4669
4670 HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4671 assert(from_hr != NULL, "sanity");
4672 assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4673
4674 HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4675 assert(to_hr != NULL, "sanity");
4676 assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4677 #endif // ASSERT
4678
4679 // The object might be in the process of being copied by another
4680 // worker so we cannot trust that its to-space image is
4681 // well-formed. So we have to read its size from its from-space
4682 // image which we know should not be changing.
4683 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4684 }
4685
4686 template <G1Barrier barrier, bool do_mark_object>
4687 oop G1ParCopyClosure<barrier, do_mark_object>
4688 ::copy_to_survivor_space(oop old) {
4689 size_t word_sz = old->size();
4690 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4691 // +1 to make the -1 indexes valid...
4692 int young_index = from_region->young_index_in_cset()+1;
4693 assert( (from_region->is_young() && young_index > 0) ||
4694 (!from_region->is_young() && young_index == 0), "invariant" );
4695 G1CollectorPolicy* g1p = _g1->g1_policy();
4696 markOop m = old->mark();
4697 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4698 : m->age();
4699 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4700 word_sz);
4701 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4702 #ifndef PRODUCT
4703 // Should this evacuation fail?
4704 if (_g1->evacuation_should_fail()) {
4705 if (obj_ptr != NULL) {
4706 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4707 obj_ptr = NULL;
4763 } else {
4764 // No point in using the slower heap_region_containing() method,
4765 // given that we know obj is in the heap.
4766 _scanner.set_region(_g1->heap_region_containing_raw(obj));
4767 obj->oop_iterate_backwards(&_scanner);
4768 }
4769 } else {
4770 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4771 obj = forward_ptr;
4772 }
4773 return obj;
4774 }
4775
4776 template <class T>
4777 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4778 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4779 _scanned_klass->record_modified_oops();
4780 }
4781 }
4782
4783 template <G1Barrier barrier, bool do_mark_object>
4784 template <class T>
4785 void G1ParCopyClosure<barrier, do_mark_object>
4786 ::do_oop_work(T* p) {
4787 oop obj = oopDesc::load_decode_heap_oop(p);
4788
4789 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4790
4791 // here the null check is implicit in the cset_fast_test() test
4792 if (_g1->in_cset_fast_test(obj)) {
4793 oop forwardee;
4794 if (obj->is_forwarded()) {
4795 forwardee = obj->forwardee();
4796 } else {
4797 forwardee = copy_to_survivor_space(obj);
4798 }
4799 assert(forwardee != NULL, "forwardee should not be NULL");
4800 oopDesc::encode_store_heap_oop(p, forwardee);
4801 if (do_mark_object && forwardee != obj) {
4802 // If the object is self-forwarded we don't need to explicitly
4803 // mark it, the evacuation failure protocol will do so.
4804 mark_forwarded_object(obj, forwardee);
4805 }
4806
4807 if (barrier == G1BarrierKlass) {
4808 do_klass_barrier(p, forwardee);
4809 }
4810 } else {
4811 // The object is not in collection set. If we're a root scanning
4812 // closure during an initial mark pause (i.e. do_mark_object will
4813 // be true) then attempt to mark the object.
4814 if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4815 mark_object(obj);
4816 }
4817 }
4818
4819 if (barrier == G1BarrierEvac && obj != NULL) {
4820 _par_scan_state->update_rs(_from, p, _worker_id);
4821 }
4822 }
4823
4824 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4825 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4826
4827 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4828 assert(has_partial_array_mask(p), "invariant");
4829 oop from_obj = clear_partial_array_mask(p);
4830
4831 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4832 assert(from_obj->is_objArray(), "must be obj array");
4833 objArrayOop from_obj_array = objArrayOop(from_obj);
4834 // The from-space object contains the real length.
4835 int length = from_obj_array->length();
4836
4837 assert(from_obj->is_forwarded(), "must be forwarded");
4838 oop to_obj = from_obj->forwardee();
4839 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4840 objArrayOop to_obj_array = objArrayOop(to_obj);
4841 // We keep track of the next start index in the length field of the
4842 // to-space object.
4843 int next_index = to_obj_array->length();
4844 assert(0 <= next_index && next_index < length,
4845 err_msg("invariant, next index: %d, length: %d", next_index, length));
|