< prev index next >

src/share/vm/gc/cms/cmsOopClosures.hpp

Print this page
rev 9847 : 8146395: Add inline qualifier in oop.hpp and fix inlining in gc files
Summary: Fix remaining issues after 8146401

*** 40,56 **** class Par_MarkFromRootsClosure; // Decode the oop and call do_oop on it. #define DO_OOP_WORK_DEFN \ void do_oop(oop obj); \ ! template <class T> inline void do_oop_work(T* p) { \ ! T heap_oop = oopDesc::load_heap_oop(p); \ ! if (!oopDesc::is_null(heap_oop)) { \ ! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ ! do_oop(obj); \ ! } \ ! } // TODO: This duplication of the MetadataAwareOopClosure class is only needed // because some CMS OopClosures derive from OopsInGenClosure. It would be // good to get rid of them completely. class MetadataAwareOopsInGenClosure: public OopsInGenClosure { --- 40,50 ---- class Par_MarkFromRootsClosure; // Decode the oop and call do_oop on it. #define DO_OOP_WORK_DEFN \ void do_oop(oop obj); \ ! template <class T> inline void do_oop_work(T* p); // TODO: This duplication of the MetadataAwareOopClosure class is only needed // because some CMS OopClosures derive from OopsInGenClosure. It would be // good to get rid of them completely. class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
*** 129,140 **** CMSBitMap* mod_union_table, CMSMarkStack* mark_stack, bool concurrent_precleaning); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } }; // In the parallel case, the bit map and the // reference processor are currently all shared. Access to // these shared mutable structures must use appropriate --- 123,134 ---- CMSBitMap* mod_union_table, CMSMarkStack* mark_stack, bool concurrent_precleaning); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); }; // In the parallel case, the bit map and the // reference processor are currently all shared. Access to // these shared mutable structures must use appropriate
*** 155,166 **** ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } }; // The non-parallel version (the parallel version appears further below). class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private: --- 149,160 ---- ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); }; // The non-parallel version (the parallel version appears further below). class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private:
*** 184,195 **** CMSCollector* collector, bool should_yield, bool concurrent_precleaning); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } void set_freelistLock(Mutex* m) { _freelistLock = m; } --- 178,189 ---- CMSCollector* collector, bool should_yield, bool concurrent_precleaning); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); void set_freelistLock(Mutex* m) { _freelistLock = m; }
*** 218,229 **** ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } void trim_queue(uint size); }; // This closure is used during the concurrent marking phase --- 212,223 ---- ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); void trim_queue(uint size); }; // This closure is used during the concurrent marking phase
*** 247,258 **** CMSMarkStack* markStack, HeapWord* finger, MarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: inline void do_yield_check(); --- 241,252 ---- CMSMarkStack* markStack, HeapWord* finger, MarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: inline void do_yield_check();
*** 285,296 **** HeapWord* finger, HeapWord** global_finger_addr, Par_MarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: inline void do_yield_check(); --- 279,290 ---- HeapWord* finger, HeapWord** global_finger_addr, Par_MarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: inline void do_yield_check();
*** 316,327 **** CMSBitMap* bit_map, CMSMarkStack* mark_stack, bool cpc); bool concurrent_precleaning() const { return _concurrent_precleaning; } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } }; class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; --- 310,321 ---- CMSBitMap* bit_map, CMSMarkStack* mark_stack, bool cpc); bool concurrent_precleaning() const { return _concurrent_precleaning; } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); }; class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector;
*** 334,345 **** CMSInnerParMarkAndPushClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } ! inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } }; // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method. --- 328,339 ---- CMSInnerParMarkAndPushClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); ! inline void do_oop_nv(oop* p); ! inline void do_oop_nv(narrowOop* p); }; // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method.
< prev index next >