100 #endif
101
102 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
103 size_t plab_word_size) {
104 ChunkArray* sca = survivor_chunk_array();
105 if (sca != NULL) {
106 // A non-null SCA implies that we want the PLAB data recorded.
107 sca->record_sample(plab_start, plab_word_size);
108 }
109 }
110
111 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
112 return new_obj->is_objArray() &&
113 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
114 new_obj != old_obj;
115 }
116
117 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
118 assert(old->is_objArray(), "must be obj array");
119 assert(old->is_forwarded(), "must be forwarded");
120 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
121 assert(!old_gen()->is_in(old), "must be in young generation.");
122
123 objArrayOop obj = objArrayOop(old->forwardee());
124 // Process ParGCArrayScanChunk elements now
125 // and push the remainder back onto queue
126 int start = arrayOop(old)->length();
127 int end = obj->length();
128 int remainder = end - start;
129 assert(start <= end, "just checking");
130 if (remainder > 2 * ParGCArrayScanChunk) {
131 // Test above combines last partial chunk with a full chunk
132 end = start + ParGCArrayScanChunk;
133 arrayOop(old)->set_length(end);
134 // Push remainder.
135 bool ok = work_queue()->push(old);
136 assert(ok, "just popped, push must be okay");
137 } else {
138 // Restore length so that it can be used if there
139 // is a promotion failure and forwarding pointers
140 // must be removed.
182 // work queue, allowing them to be stolen and draining our
183 // private overflow stack.
184 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
185 }
186
187 bool ParScanThreadState::take_from_overflow_stack() {
188 assert(ParGCUseLocalOverflow, "Else should not call");
189 assert(young_gen()->overflow_list() == NULL, "Error");
190 ObjToScanQueue* queue = work_queue();
191 Stack<oop, mtGC>* const of_stack = overflow_stack();
192 const size_t num_overflow_elems = of_stack->size();
193 const size_t space_available = queue->max_elems() - queue->size();
194 const size_t num_take_elems = MIN3(space_available / 4,
195 ParGCDesiredObjsFromOverflowList,
196 num_overflow_elems);
197 // Transfer the most recent num_take_elems from the overflow
198 // stack to our work queue.
199 for (size_t i = 0; i != num_take_elems; i++) {
200 oop cur = of_stack->pop();
201 oop obj_to_push = cur->forwardee();
202 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
203 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
204 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
205 if (should_be_partially_scanned(obj_to_push, cur)) {
206 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
207 obj_to_push = cur;
208 }
209 bool ok = queue->push(obj_to_push);
210 assert(ok, "Should have succeeded");
211 }
212 assert(young_gen()->overflow_list() == NULL, "Error");
213 return num_take_elems > 0; // was something transferred?
214 }
215
216 void ParScanThreadState::push_on_overflow_stack(oop p) {
217 assert(ParGCUseLocalOverflow, "Else should not call");
218 overflow_stack()->push(p);
219 assert(young_gen()->overflow_list() == NULL, "Error");
220 }
221
222 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
223
224 // Otherwise, if the object is small enough, try to reallocate the
678 #endif
679
680 // ParNewGeneration::
681 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
682 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
683
684 template <class T>
685 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
686 #ifdef ASSERT
687 {
688 assert(!oopDesc::is_null(*p), "expected non-null ref");
689 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
690 // We never expect to see a null reference being processed
691 // as a weak reference.
692 assert(obj->is_oop(), "expected an oop while scanning weak refs");
693 }
694 #endif // ASSERT
695
696 _par_cl->do_oop_nv(p);
697
698 if (Universe::heap()->is_in_reserved(p)) {
699 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
700 _rs->write_ref_field_gc_par(p, obj);
701 }
702 }
703
704 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
705 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
706
707 // ParNewGeneration::
708 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
709 DefNewGeneration::KeepAliveClosure(cl) {}
710
711 template <class T>
712 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
713 #ifdef ASSERT
714 {
715 assert(!oopDesc::is_null(*p), "expected non-null ref");
716 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
717 // We never expect to see a null reference being processed
718 // as a weak reference.
719 assert(obj->is_oop(), "expected an oop while scanning weak refs");
720 }
721 #endif // ASSERT
722
723 _cl->do_oop_nv(p);
724
725 if (Universe::heap()->is_in_reserved(p)) {
726 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
727 _rs->write_ref_field_gc_par(p, obj);
728 }
729 }
730
731 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
732 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
733
734 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
735 T heap_oop = oopDesc::load_heap_oop(p);
736 if (!oopDesc::is_null(heap_oop)) {
737 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
738 if ((HeapWord*)obj < _boundary) {
739 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
740 oop new_obj = obj->is_forwarded()
741 ? obj->forwardee()
742 : _g->DefNewGeneration::copy_to_survivor_space(obj);
743 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
744 }
745 if (_gc_barrier) {
804 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
805 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
806 EnqueueTask& _task;
807
808 public:
809 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
810 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
811 _task(task)
812 { }
813
814 virtual void work(uint worker_id)
815 {
816 _task.work(worker_id);
817 }
818 };
819
820
821 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
822 {
823 GenCollectedHeap* gch = GenCollectedHeap::heap();
824 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
825 "not a generational heap");
826 FlexibleWorkGang* workers = gch->workers();
827 assert(workers != NULL, "Need parallel worker threads.");
828 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
829 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
830 _generation.reserved().end(), _state_set);
831 workers->run_task(&rp_task);
832 _state_set.reset(0 /* bad value in debug if not reset */,
833 _generation.promotion_failed());
834 }
835
836 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
837 {
838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 FlexibleWorkGang* workers = gch->workers();
840 assert(workers != NULL, "Need parallel worker threads.");
841 ParNewRefEnqueueTaskProxy enq_task(task);
842 workers->run_task(&enq_task);
843 }
844
845 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
880 _promo_failure_scan_stack.clear(true); // Clear cached segments.
881
882 remove_forwarding_pointers();
883 if (PrintGCDetails) {
884 gclog_or_tty->print(" (promotion failed)");
885 }
886 // All the spaces are in play for mark-sweep.
887 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
888 from()->set_next_compaction_space(to());
889 gch->set_incremental_collection_failed();
890 // Inform the next generation that a promotion failure occurred.
891 _old_gen->promotion_failure_occurred();
892
893 // Trace promotion failure in the parallel GC threads
894 thread_state_set.trace_promotion_failed(gc_tracer());
895 // Single threaded code may have reported promotion failure to the global state
896 if (_promotion_failed_info.has_failed()) {
897 _gc_tracer.report_promotion_failed(_promotion_failed_info);
898 }
899 // Reset the PromotionFailureALot counters.
900 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
901 }
902
903 void ParNewGeneration::collect(bool full,
904 bool clear_all_soft_refs,
905 size_t size,
906 bool is_tlab) {
907 assert(full || size > 0, "otherwise we don't want to collect");
908
909 GenCollectedHeap* gch = GenCollectedHeap::heap();
910
911 _gc_timer->register_gc_start();
912
913 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
914 "not a CMS generational heap");
915 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
916 FlexibleWorkGang* workers = gch->workers();
917 assert(workers != NULL, "Need workgang for parallel work");
918 int active_workers =
919 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
920 workers->active_workers(),
921 Threads::number_of_non_daemon_threads());
922 workers->set_active_workers(active_workers);
923 _old_gen = gch->old_gen();
924
925 // If the next generation is too full to accommodate worst-case promotion
926 // from this generation, pass on collection; let the next generation
927 // do it.
928 if (!collection_attempt_is_safe()) {
929 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
930 return;
931 }
932 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
933
934 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
1173
1174 if (!_promotion_failed) {
1175 new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1176 old, m, sz);
1177 }
1178
1179 if (new_obj == NULL) {
1180 // promotion failed, forward to self
1181 _promotion_failed = true;
1182 new_obj = old;
1183
1184 preserve_mark_if_necessary(old, m);
1185 par_scan_state->register_promotion_failure(sz);
1186 }
1187
1188 old->forward_to(new_obj);
1189 forward_ptr = NULL;
1190 } else {
1191 // Is in to-space; do copying ourselves.
1192 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1193 assert(Universe::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1194 forward_ptr = old->forward_to_atomic(new_obj);
1195 // Restore the mark word copied above.
1196 new_obj->set_mark(m);
1197 // Increment age if obj still in new generation
1198 new_obj->incr_age();
1199 par_scan_state->age_table()->add(new_obj, sz);
1200 }
1201 assert(new_obj != NULL, "just checking");
1202
1203 #ifndef PRODUCT
1204 // This code must come after the CAS test, or it will print incorrect
1205 // information.
1206 if (TraceScavenge) {
1207 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1208 is_in_reserved(new_obj) ? "copying" : "tenuring",
1209 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1210 }
1211 #endif
1212
1213 if (forward_ptr == NULL) {
|
100 #endif
101
102 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
103 size_t plab_word_size) {
104 ChunkArray* sca = survivor_chunk_array();
105 if (sca != NULL) {
106 // A non-null SCA implies that we want the PLAB data recorded.
107 sca->record_sample(plab_start, plab_word_size);
108 }
109 }
110
111 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
112 return new_obj->is_objArray() &&
113 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
114 new_obj != old_obj;
115 }
116
117 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
118 assert(old->is_objArray(), "must be obj array");
119 assert(old->is_forwarded(), "must be forwarded");
120 assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
121 assert(!old_gen()->is_in(old), "must be in young generation.");
122
123 objArrayOop obj = objArrayOop(old->forwardee());
124 // Process ParGCArrayScanChunk elements now
125 // and push the remainder back onto queue
126 int start = arrayOop(old)->length();
127 int end = obj->length();
128 int remainder = end - start;
129 assert(start <= end, "just checking");
130 if (remainder > 2 * ParGCArrayScanChunk) {
131 // Test above combines last partial chunk with a full chunk
132 end = start + ParGCArrayScanChunk;
133 arrayOop(old)->set_length(end);
134 // Push remainder.
135 bool ok = work_queue()->push(old);
136 assert(ok, "just popped, push must be okay");
137 } else {
138 // Restore length so that it can be used if there
139 // is a promotion failure and forwarding pointers
140 // must be removed.
182 // work queue, allowing them to be stolen and draining our
183 // private overflow stack.
184 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
185 }
186
187 bool ParScanThreadState::take_from_overflow_stack() {
188 assert(ParGCUseLocalOverflow, "Else should not call");
189 assert(young_gen()->overflow_list() == NULL, "Error");
190 ObjToScanQueue* queue = work_queue();
191 Stack<oop, mtGC>* const of_stack = overflow_stack();
192 const size_t num_overflow_elems = of_stack->size();
193 const size_t space_available = queue->max_elems() - queue->size();
194 const size_t num_take_elems = MIN3(space_available / 4,
195 ParGCDesiredObjsFromOverflowList,
196 num_overflow_elems);
197 // Transfer the most recent num_take_elems from the overflow
198 // stack to our work queue.
199 for (size_t i = 0; i != num_take_elems; i++) {
200 oop cur = of_stack->pop();
201 oop obj_to_push = cur->forwardee();
202 assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
203 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
204 assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
205 if (should_be_partially_scanned(obj_to_push, cur)) {
206 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
207 obj_to_push = cur;
208 }
209 bool ok = queue->push(obj_to_push);
210 assert(ok, "Should have succeeded");
211 }
212 assert(young_gen()->overflow_list() == NULL, "Error");
213 return num_take_elems > 0; // was something transferred?
214 }
215
216 void ParScanThreadState::push_on_overflow_stack(oop p) {
217 assert(ParGCUseLocalOverflow, "Else should not call");
218 overflow_stack()->push(p);
219 assert(young_gen()->overflow_list() == NULL, "Error");
220 }
221
222 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
223
224 // Otherwise, if the object is small enough, try to reallocate the
678 #endif
679
680 // ParNewGeneration::
681 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
682 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
683
684 template <class T>
685 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
686 #ifdef ASSERT
687 {
688 assert(!oopDesc::is_null(*p), "expected non-null ref");
689 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
690 // We never expect to see a null reference being processed
691 // as a weak reference.
692 assert(obj->is_oop(), "expected an oop while scanning weak refs");
693 }
694 #endif // ASSERT
695
696 _par_cl->do_oop_nv(p);
697
698 if (GenCollectedHeap::heap()->is_in_reserved(p)) {
699 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
700 _rs->write_ref_field_gc_par(p, obj);
701 }
702 }
703
704 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
705 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
706
707 // ParNewGeneration::
708 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
709 DefNewGeneration::KeepAliveClosure(cl) {}
710
711 template <class T>
712 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
713 #ifdef ASSERT
714 {
715 assert(!oopDesc::is_null(*p), "expected non-null ref");
716 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
717 // We never expect to see a null reference being processed
718 // as a weak reference.
719 assert(obj->is_oop(), "expected an oop while scanning weak refs");
720 }
721 #endif // ASSERT
722
723 _cl->do_oop_nv(p);
724
725 if (GenCollectedHeap::heap()->is_in_reserved(p)) {
726 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
727 _rs->write_ref_field_gc_par(p, obj);
728 }
729 }
730
731 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
732 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
733
734 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
735 T heap_oop = oopDesc::load_heap_oop(p);
736 if (!oopDesc::is_null(heap_oop)) {
737 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
738 if ((HeapWord*)obj < _boundary) {
739 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
740 oop new_obj = obj->is_forwarded()
741 ? obj->forwardee()
742 : _g->DefNewGeneration::copy_to_survivor_space(obj);
743 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
744 }
745 if (_gc_barrier) {
804 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
805 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
806 EnqueueTask& _task;
807
808 public:
809 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
810 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
811 _task(task)
812 { }
813
814 virtual void work(uint worker_id)
815 {
816 _task.work(worker_id);
817 }
818 };
819
820
821 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
822 {
823 GenCollectedHeap* gch = GenCollectedHeap::heap();
824 FlexibleWorkGang* workers = gch->workers();
825 assert(workers != NULL, "Need parallel worker threads.");
826 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
827 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
828 _generation.reserved().end(), _state_set);
829 workers->run_task(&rp_task);
830 _state_set.reset(0 /* bad value in debug if not reset */,
831 _generation.promotion_failed());
832 }
833
834 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
835 {
836 GenCollectedHeap* gch = GenCollectedHeap::heap();
837 FlexibleWorkGang* workers = gch->workers();
838 assert(workers != NULL, "Need parallel worker threads.");
839 ParNewRefEnqueueTaskProxy enq_task(task);
840 workers->run_task(&enq_task);
841 }
842
843 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
878 _promo_failure_scan_stack.clear(true); // Clear cached segments.
879
880 remove_forwarding_pointers();
881 if (PrintGCDetails) {
882 gclog_or_tty->print(" (promotion failed)");
883 }
884 // All the spaces are in play for mark-sweep.
885 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
886 from()->set_next_compaction_space(to());
887 gch->set_incremental_collection_failed();
888 // Inform the next generation that a promotion failure occurred.
889 _old_gen->promotion_failure_occurred();
890
891 // Trace promotion failure in the parallel GC threads
892 thread_state_set.trace_promotion_failed(gc_tracer());
893 // Single threaded code may have reported promotion failure to the global state
894 if (_promotion_failed_info.has_failed()) {
895 _gc_tracer.report_promotion_failed(_promotion_failed_info);
896 }
897 // Reset the PromotionFailureALot counters.
898 NOT_PRODUCT(gch->reset_promotion_should_fail();)
899 }
900
901 void ParNewGeneration::collect(bool full,
902 bool clear_all_soft_refs,
903 size_t size,
904 bool is_tlab) {
905 assert(full || size > 0, "otherwise we don't want to collect");
906
907 GenCollectedHeap* gch = GenCollectedHeap::heap();
908
909 _gc_timer->register_gc_start();
910
911 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
912 FlexibleWorkGang* workers = gch->workers();
913 assert(workers != NULL, "Need workgang for parallel work");
914 int active_workers =
915 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
916 workers->active_workers(),
917 Threads::number_of_non_daemon_threads());
918 workers->set_active_workers(active_workers);
919 _old_gen = gch->old_gen();
920
921 // If the next generation is too full to accommodate worst-case promotion
922 // from this generation, pass on collection; let the next generation
923 // do it.
924 if (!collection_attempt_is_safe()) {
925 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
926 return;
927 }
928 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
929
930 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
1169
1170 if (!_promotion_failed) {
1171 new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1172 old, m, sz);
1173 }
1174
1175 if (new_obj == NULL) {
1176 // promotion failed, forward to self
1177 _promotion_failed = true;
1178 new_obj = old;
1179
1180 preserve_mark_if_necessary(old, m);
1181 par_scan_state->register_promotion_failure(sz);
1182 }
1183
1184 old->forward_to(new_obj);
1185 forward_ptr = NULL;
1186 } else {
1187 // Is in to-space; do copying ourselves.
1188 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1189 assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1190 forward_ptr = old->forward_to_atomic(new_obj);
1191 // Restore the mark word copied above.
1192 new_obj->set_mark(m);
1193 // Increment age if obj still in new generation
1194 new_obj->incr_age();
1195 par_scan_state->age_table()->add(new_obj, sz);
1196 }
1197 assert(new_obj != NULL, "just checking");
1198
1199 #ifndef PRODUCT
1200 // This code must come after the CAS test, or it will print incorrect
1201 // information.
1202 if (TraceScavenge) {
1203 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1204 is_in_reserved(new_obj) ? "copying" : "tenuring",
1205 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1206 }
1207 #endif
1208
1209 if (forward_ptr == NULL) {
|