1 /* 2 * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP 26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP 27 28 #include "gc/shared/genOopClosures.hpp" 29 #include "gc/shared/taskqueue.hpp" 30 #include "memory/iterator.hpp" 31 32 ///////////////////////////////////////////////////////////////// 33 // Closures used by ConcurrentMarkSweepGeneration's collector 34 ///////////////////////////////////////////////////////////////// 35 class ConcurrentMarkSweepGeneration; 36 class CMSBitMap; 37 class CMSMarkStack; 38 class CMSCollector; 39 class MarkFromRootsClosure; 40 class ParMarkFromRootsClosure; 41 42 // Decode the oop and call do_oop on it. 43 #define DO_OOP_WORK_DEFN \ 44 void do_oop(oop obj); \ 45 template <class T> inline void do_oop_work(T* p); 46 47 // TODO: This duplication of the MetadataAwareOopClosure class is only needed 48 // because some CMS OopClosures derive from OopsInGenClosure. It would be 49 // good to get rid of them completely. 50 class MetadataAwareOopsInGenClosure: public OopsInGenClosure { 51 public: 52 virtual bool do_metadata() { return do_metadata_nv(); } 53 inline bool do_metadata_nv() { return true; } 54 55 virtual void do_klass(Klass* k); 56 void do_klass_nv(Klass* k); 57 58 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); } 59 void do_cld_nv(ClassLoaderData* cld); 60 }; 61 62 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { 63 private: 64 const MemRegion _span; 65 CMSBitMap* _bitMap; 66 protected: 67 DO_OOP_WORK_DEFN 68 public: 69 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 70 virtual void do_oop(oop* p); 71 virtual void do_oop(narrowOop* p); 72 }; 73 74 class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { 75 private: 76 const MemRegion _span; 77 CMSBitMap* _bitMap; 78 protected: 79 DO_OOP_WORK_DEFN 80 public: 81 ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 82 virtual void do_oop(oop* p); 83 virtual void do_oop(narrowOop* p); 84 }; 85 86 // A variant of the above used in certain kinds of CMS 87 // marking verification. 88 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure { 89 private: 90 const MemRegion _span; 91 CMSBitMap* _verification_bm; 92 CMSBitMap* _cms_bm; 93 protected: 94 DO_OOP_WORK_DEFN 95 public: 96 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, 97 CMSBitMap* cms_bm); 98 virtual void do_oop(oop* p); 99 virtual void do_oop(narrowOop* p); 100 }; 101 102 // The non-parallel version (the parallel version appears further below). 103 class PushAndMarkClosure: public MetadataAwareOopClosure { 104 private: 105 CMSCollector* _collector; 106 MemRegion _span; 107 CMSBitMap* _bit_map; 108 CMSBitMap* _mod_union_table; 109 CMSMarkStack* _mark_stack; 110 bool _concurrent_precleaning; 111 protected: 112 DO_OOP_WORK_DEFN 113 public: 114 PushAndMarkClosure(CMSCollector* collector, 115 MemRegion span, 116 ReferenceDiscoverer* rd, 117 CMSBitMap* bit_map, 118 CMSBitMap* mod_union_table, 119 CMSMarkStack* mark_stack, 120 bool concurrent_precleaning); 121 virtual void do_oop(oop* p); 122 virtual void do_oop(narrowOop* p); 123 inline void do_oop_nv(oop* p); 124 inline void do_oop_nv(narrowOop* p); 125 }; 126 127 // In the parallel case, the bit map and the 128 // reference processor are currently all shared. Access to 129 // these shared mutable structures must use appropriate 130 // synchronization (for instance, via CAS). The marking stack 131 // used in the non-parallel case above is here replaced with 132 // an OopTaskQueue structure to allow efficient work stealing. 133 class ParPushAndMarkClosure: public MetadataAwareOopClosure { 134 private: 135 CMSCollector* _collector; 136 MemRegion _span; 137 CMSBitMap* _bit_map; 138 OopTaskQueue* _work_queue; 139 protected: 140 DO_OOP_WORK_DEFN 141 public: 142 ParPushAndMarkClosure(CMSCollector* collector, 143 MemRegion span, 144 ReferenceDiscoverer* rd, 145 CMSBitMap* bit_map, 146 OopTaskQueue* work_queue); 147 virtual void do_oop(oop* p); 148 virtual void do_oop(narrowOop* p); 149 inline void do_oop_nv(oop* p); 150 inline void do_oop_nv(narrowOop* p); 151 }; 152 153 // The non-parallel version (the parallel version appears further below). 154 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { 155 private: 156 MemRegion _span; 157 CMSBitMap* _bit_map; 158 CMSMarkStack* _mark_stack; 159 PushAndMarkClosure _pushAndMarkClosure; 160 CMSCollector* _collector; 161 Mutex* _freelistLock; 162 bool _yield; 163 // Whether closure is being used for concurrent precleaning 164 bool _concurrent_precleaning; 165 protected: 166 DO_OOP_WORK_DEFN 167 public: 168 MarkRefsIntoAndScanClosure(MemRegion span, 169 ReferenceDiscoverer* rd, 170 CMSBitMap* bit_map, 171 CMSBitMap* mod_union_table, 172 CMSMarkStack* mark_stack, 173 CMSCollector* collector, 174 bool should_yield, 175 bool concurrent_precleaning); 176 virtual void do_oop(oop* p); 177 virtual void do_oop(narrowOop* p); 178 inline void do_oop_nv(oop* p); 179 inline void do_oop_nv(narrowOop* p); 180 181 void set_freelistLock(Mutex* m) { 182 _freelistLock = m; 183 } 184 185 private: 186 inline void do_yield_check(); 187 void do_yield_work(); 188 bool take_from_overflow_list(); 189 }; 190 191 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit 192 // stack and the bitMap are shared, so access needs to be suitably 193 // synchronized. An OopTaskQueue structure, supporting efficient 194 // work stealing, replaces a CMSMarkStack for storing grey objects. 195 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { 196 private: 197 MemRegion _span; 198 CMSBitMap* _bit_map; 199 OopTaskQueue* _work_queue; 200 const uint _low_water_mark; 201 ParPushAndMarkClosure _parPushAndMarkClosure; 202 protected: 203 DO_OOP_WORK_DEFN 204 public: 205 ParMarkRefsIntoAndScanClosure(CMSCollector* collector, 206 MemRegion span, 207 ReferenceDiscoverer* rd, 208 CMSBitMap* bit_map, 209 OopTaskQueue* work_queue); 210 virtual void do_oop(oop* p); 211 virtual void do_oop(narrowOop* p); 212 inline void do_oop_nv(oop* p); 213 inline void do_oop_nv(narrowOop* p); 214 215 void trim_queue(uint size); 216 }; 217 218 // This closure is used during the concurrent marking phase 219 // following the first checkpoint. Its use is buried in 220 // the closure MarkFromRootsClosure. 221 class PushOrMarkClosure: public MetadataAwareOopClosure { 222 private: 223 CMSCollector* _collector; 224 MemRegion _span; 225 CMSBitMap* _bitMap; 226 CMSMarkStack* _markStack; 227 HeapWord* const _finger; 228 MarkFromRootsClosure* const 229 _parent; 230 protected: 231 DO_OOP_WORK_DEFN 232 public: 233 PushOrMarkClosure(CMSCollector* cms_collector, 234 MemRegion span, 235 CMSBitMap* bitMap, 236 CMSMarkStack* markStack, 237 HeapWord* finger, 238 MarkFromRootsClosure* parent); 239 virtual void do_oop(oop* p); 240 virtual void do_oop(narrowOop* p); 241 inline void do_oop_nv(oop* p); 242 inline void do_oop_nv(narrowOop* p); 243 244 // Deal with a stack overflow condition 245 void handle_stack_overflow(HeapWord* lost); 246 private: 247 inline void do_yield_check(); 248 }; 249 250 // A parallel (MT) version of the above. 251 // This closure is used during the concurrent marking phase 252 // following the first checkpoint. Its use is buried in 253 // the closure ParMarkFromRootsClosure. 254 class ParPushOrMarkClosure: public MetadataAwareOopClosure { 255 private: 256 CMSCollector* _collector; 257 MemRegion _whole_span; 258 MemRegion _span; // local chunk 259 CMSBitMap* _bit_map; 260 OopTaskQueue* _work_queue; 261 CMSMarkStack* _overflow_stack; 262 HeapWord* const _finger; 263 HeapWord* volatile* const _global_finger_addr; 264 ParMarkFromRootsClosure* const _parent; 265 protected: 266 DO_OOP_WORK_DEFN 267 public: 268 ParPushOrMarkClosure(CMSCollector* cms_collector, 269 MemRegion span, 270 CMSBitMap* bit_map, 271 OopTaskQueue* work_queue, 272 CMSMarkStack* mark_stack, 273 HeapWord* finger, 274 HeapWord* volatile* global_finger_addr, 275 ParMarkFromRootsClosure* parent); 276 virtual void do_oop(oop* p); 277 virtual void do_oop(narrowOop* p); 278 inline void do_oop_nv(oop* p); 279 inline void do_oop_nv(narrowOop* p); 280 281 // Deal with a stack overflow condition 282 void handle_stack_overflow(HeapWord* lost); 283 private: 284 inline void do_yield_check(); 285 }; 286 287 // For objects in CMS generation, this closure marks 288 // given objects (transitively) as being reachable/live. 289 // This is currently used during the (weak) reference object 290 // processing phase of the CMS final checkpoint step, as 291 // well as during the concurrent precleaning of the discovered 292 // reference lists. 293 class CMSKeepAliveClosure: public MetadataAwareOopClosure { 294 private: 295 CMSCollector* _collector; 296 const MemRegion _span; 297 CMSMarkStack* _mark_stack; 298 CMSBitMap* _bit_map; 299 bool _concurrent_precleaning; 300 protected: 301 DO_OOP_WORK_DEFN 302 public: 303 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 304 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 305 bool cpc); 306 bool concurrent_precleaning() const { return _concurrent_precleaning; } 307 virtual void do_oop(oop* p); 308 virtual void do_oop(narrowOop* p); 309 inline void do_oop_nv(oop* p); 310 inline void do_oop_nv(narrowOop* p); 311 }; 312 313 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure { 314 private: 315 CMSCollector* _collector; 316 MemRegion _span; 317 OopTaskQueue* _work_queue; 318 CMSBitMap* _bit_map; 319 protected: 320 DO_OOP_WORK_DEFN 321 public: 322 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 323 MemRegion span, CMSBitMap* bit_map, 324 OopTaskQueue* work_queue); 325 virtual void do_oop(oop* p); 326 virtual void do_oop(narrowOop* p); 327 inline void do_oop_nv(oop* p); 328 inline void do_oop_nv(narrowOop* p); 329 }; 330 331 // A parallel (MT) version of the above, used when 332 // reference processing is parallel; the only difference 333 // is in the do_oop method. 334 class CMSParKeepAliveClosure: public MetadataAwareOopClosure { 335 private: 336 MemRegion _span; 337 OopTaskQueue* _work_queue; 338 CMSBitMap* _bit_map; 339 CMSInnerParMarkAndPushClosure 340 _mark_and_push; 341 const uint _low_water_mark; 342 void trim_queue(uint max); 343 protected: 344 DO_OOP_WORK_DEFN 345 public: 346 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 347 CMSBitMap* bit_map, OopTaskQueue* work_queue); 348 virtual void do_oop(oop* p); 349 virtual void do_oop(narrowOop* p); 350 }; 351 352 #endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP