96 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, 97 age, tenured); 98 } 99 } 100 } 101 } 102 103 class PSPushContentsClosure: public BasicOopIterateClosure { 104 PSPromotionManager* _pm; 105 public: 106 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {} 107 108 template <typename T> void do_oop_nv(T* p) { 109 if (PSScavenge::should_scavenge(p)) { 110 _pm->claim_or_forward_depth(p); 111 } 112 } 113 114 virtual void do_oop(oop* p) { do_oop_nv(p); } 115 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 116 117 // Don't use the oop verification code in the oop_oop_iterate framework. 118 debug_only(virtual bool should_verify_oops() { return false; }) 119 }; 120 121 // 122 // This closure specialization will override the one that is defined in 123 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and 124 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves 125 // significantly better (especially in the Derby benchmark) using opposite 126 // order of these function calls. 127 // 128 template <> 129 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 130 oop_oop_iterate_ref_processing<oop>(obj, closure); 131 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure); 132 } 133 134 template <> 135 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 136 oop_oop_iterate_ref_processing<narrowOop>(obj, closure); 137 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure); 138 } | 96 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, 97 age, tenured); 98 } 99 } 100 } 101 } 102 103 class PSPushContentsClosure: public BasicOopIterateClosure { 104 PSPromotionManager* _pm; 105 public: 106 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {} 107 108 template <typename T> void do_oop_nv(T* p) { 109 if (PSScavenge::should_scavenge(p)) { 110 _pm->claim_or_forward_depth(p); 111 } 112 } 113 114 virtual void do_oop(oop* p) { do_oop_nv(p); } 115 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 116 }; 117 118 // 119 // This closure specialization will override the one that is defined in 120 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and 121 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves 122 // significantly better (especially in the Derby benchmark) using opposite 123 // order of these function calls. 124 // 125 template <> 126 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 127 oop_oop_iterate_ref_processing<oop>(obj, closure); 128 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure); 129 } 130 131 template <> 132 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 133 oop_oop_iterate_ref_processing<narrowOop>(obj, closure); 134 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure); 135 } |