27 28 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp" 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 30 #include "oops/oop.inline.hpp" 31 32 // Trim our work_queue so its length is below max at return 33 inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) { 34 while (_work_queue->size() > max) { 35 oop newOop; 36 if (_work_queue->pop_local(newOop)) { 37 assert(newOop->is_oop(), "Expected an oop"); 38 assert(_bit_map->isMarked((HeapWord*)newOop), 39 "only grey objects on this stack"); 40 // iterate over the oops in this oop, marking and pushing 41 // the ones in CMS heap (i.e. in _span). 42 newOop->oop_iterate(&_par_pushAndMarkClosure); 43 } 44 } 45 } 46 47 // CMSOopClosure and CMSoopsInGenClosure are duplicated, 48 // until we get rid of OopsInGenClosure. 49 50 inline void CMSOopClosure::do_klass(Klass* k) { do_klass_nv(k); } 51 inline void CMSOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } 52 53 inline void CMSOopClosure::do_klass_nv(Klass* k) { 54 ClassLoaderData* cld = k->class_loader_data(); 55 do_class_loader_data(cld); 56 } 57 inline void CMSOopsInGenClosure::do_klass_nv(Klass* k) { 58 ClassLoaderData* cld = k->class_loader_data(); 59 do_class_loader_data(cld); 60 } 61 62 inline void CMSOopClosure::do_class_loader_data(ClassLoaderData* cld) { 63 assert(_klass_closure._oop_closure == this, "Must be"); 64 65 bool claim = true; // Must claim the class loader data before processing. 66 cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); 67 } 68 inline void CMSOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { 69 assert(_klass_closure._oop_closure == this, "Must be"); 70 71 bool claim = true; // Must claim the class loader data before processing. 72 cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); 73 } 74 75 76 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP | 27 28 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp" 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 30 #include "oops/oop.inline.hpp" 31 32 // Trim our work_queue so its length is below max at return 33 inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) { 34 while (_work_queue->size() > max) { 35 oop newOop; 36 if (_work_queue->pop_local(newOop)) { 37 assert(newOop->is_oop(), "Expected an oop"); 38 assert(_bit_map->isMarked((HeapWord*)newOop), 39 "only grey objects on this stack"); 40 // iterate over the oops in this oop, marking and pushing 41 // the ones in CMS heap (i.e. in _span). 42 newOop->oop_iterate(&_par_pushAndMarkClosure); 43 } 44 } 45 } 46 47 // MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated, 48 // until we get rid of OopsInGenClosure. 49 50 inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) { 51 ClassLoaderData* cld = k->class_loader_data(); 52 do_class_loader_data(cld); 53 } 54 inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } 55 56 inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { 57 assert(_klass_closure._oop_closure == this, "Must be"); 58 59 bool claim = true; // Must claim the class loader data before processing. 60 cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); 61 } 62 63 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP |