1 /* 2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP 27 28 #include "memory/genOopClosures.hpp" 29 30 ///////////////////////////////////////////////////////////////// 31 // Closures used by ConcurrentMarkSweepGeneration's collector 32 ///////////////////////////////////////////////////////////////// 33 class ConcurrentMarkSweepGeneration; 34 class CMSBitMap; 35 class CMSMarkStack; 36 class CMSCollector; 37 class MarkFromRootsClosure; 38 class Par_MarkFromRootsClosure; 39 40 // Decode the oop and call do_oop on it. 41 #define DO_OOP_WORK_DEFN \ 42 void do_oop(oop obj); \ 43 template <class T> inline void do_oop_work(T* p) { \ 44 T heap_oop = oopDesc::load_heap_oop(p); \ 45 if (!oopDesc::is_null(heap_oop)) { \ 46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ 47 do_oop(obj); \ 48 } \ 49 } 50 51 // Applies the given oop closure to all oops in all klasses visited. 52 class CMKlassClosure : public KlassClosure { 53 friend class CMSOopClosure; 54 friend class CMSOopsInGenClosure; 55 56 OopClosure* _oop_closure; 57 58 // Used when _oop_closure couldn't be set in an initialization list. 59 void initialize(OopClosure* oop_closure) { 60 assert(_oop_closure == NULL, "Should only be called once"); 61 _oop_closure = oop_closure; 62 } 63 public: 64 CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { } 65 66 void do_klass(Klass* k); 67 }; 68 69 // The base class for all CMS marking closures. 70 // It's used to proxy through the metadata to the oops defined in them. 71 class CMSOopClosure: public ExtendedOopClosure { 72 CMKlassClosure _klass_closure; 73 public: 74 CMSOopClosure() : ExtendedOopClosure() { 75 _klass_closure.initialize(this); 76 } 77 CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { 78 _klass_closure.initialize(this); 79 } 80 81 virtual bool do_metadata() { return do_metadata_nv(); } 82 inline bool do_metadata_nv() { return true; } 83 84 virtual void do_klass(Klass* k); 85 void do_klass_nv(Klass* k); 86 87 virtual void do_class_loader_data(ClassLoaderData* cld); 88 }; 89 90 // TODO: This duplication of the CMSOopClosure class is only needed because 91 // some CMS OopClosures derive from OopsInGenClosure. It would be good 92 // to get rid of them completely. 93 class CMSOopsInGenClosure: public OopsInGenClosure { 94 CMKlassClosure _klass_closure; 95 public: 96 CMSOopsInGenClosure() { 97 _klass_closure.initialize(this); 98 } 99 100 virtual bool do_metadata() { return do_metadata_nv(); } 101 inline bool do_metadata_nv() { return true; } 102 103 virtual void do_klass(Klass* k); 104 void do_klass_nv(Klass* k); 105 106 virtual void do_class_loader_data(ClassLoaderData* cld); 107 }; 108 109 class MarkRefsIntoClosure: public CMSOopsInGenClosure { 110 private: 111 const MemRegion _span; 112 CMSBitMap* _bitMap; 113 protected: 114 DO_OOP_WORK_DEFN 115 public: 116 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 117 virtual void do_oop(oop* p); 118 virtual void do_oop(narrowOop* p); 119 120 Prefetch::style prefetch_style() { 121 return Prefetch::do_read; 122 } 123 }; 124 125 class Par_MarkRefsIntoClosure: public OopsInGenClosure { 126 private: 127 const MemRegion _span; 128 CMSBitMap* _bitMap; 129 protected: 130 DO_OOP_WORK_DEFN 131 public: 132 Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 133 virtual void do_oop(oop* p); 134 virtual void do_oop(narrowOop* p); 135 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } 136 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } 137 bool do_header() { return true; } 138 Prefetch::style prefetch_style() { 139 return Prefetch::do_read; 140 } 141 }; 142 143 // A variant of the above used in certain kinds of CMS 144 // marking verification. 145 class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure { 146 private: 147 const MemRegion _span; 148 CMSBitMap* _verification_bm; 149 CMSBitMap* _cms_bm; 150 protected: 151 DO_OOP_WORK_DEFN 152 public: 153 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, 154 CMSBitMap* cms_bm); 155 virtual void do_oop(oop* p); 156 virtual void do_oop(narrowOop* p); 157 158 Prefetch::style prefetch_style() { 159 return Prefetch::do_read; 160 } 161 }; 162 163 // The non-parallel version (the parallel version appears further below). 164 class PushAndMarkClosure: public CMSOopClosure { 165 private: 166 CMSCollector* _collector; 167 MemRegion _span; 168 CMSBitMap* _bit_map; 169 CMSBitMap* _mod_union_table; 170 CMSMarkStack* _mark_stack; 171 bool _concurrent_precleaning; 172 protected: 173 DO_OOP_WORK_DEFN 174 public: 175 PushAndMarkClosure(CMSCollector* collector, 176 MemRegion span, 177 ReferenceProcessor* rp, 178 CMSBitMap* bit_map, 179 CMSBitMap* mod_union_table, 180 CMSMarkStack* mark_stack, 181 bool concurrent_precleaning); 182 virtual void do_oop(oop* p); 183 virtual void do_oop(narrowOop* p); 184 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } 185 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 186 187 Prefetch::style prefetch_style() { 188 return Prefetch::do_read; 189 } 190 }; 191 192 // In the parallel case, the bit map and the 193 // reference processor are currently all shared. Access to 194 // these shared mutable structures must use appropriate 195 // synchronization (for instance, via CAS). The marking stack 196 // used in the non-parallel case above is here replaced with 197 // an OopTaskQueue structure to allow efficient work stealing. 198 class Par_PushAndMarkClosure: public CMSOopClosure { 199 private: 200 CMSCollector* _collector; 201 MemRegion _span; 202 CMSBitMap* _bit_map; 203 OopTaskQueue* _work_queue; 204 protected: 205 DO_OOP_WORK_DEFN 206 public: 207 Par_PushAndMarkClosure(CMSCollector* collector, 208 MemRegion span, 209 ReferenceProcessor* rp, 210 CMSBitMap* bit_map, 211 OopTaskQueue* work_queue); 212 virtual void do_oop(oop* p); 213 virtual void do_oop(narrowOop* p); 214 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 215 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 216 217 Prefetch::style prefetch_style() { 218 return Prefetch::do_read; 219 } 220 }; 221 222 // The non-parallel version (the parallel version appears further below). 223 class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { 224 private: 225 MemRegion _span; 226 CMSBitMap* _bit_map; 227 CMSMarkStack* _mark_stack; 228 PushAndMarkClosure _pushAndMarkClosure; 229 CMSCollector* _collector; 230 Mutex* _freelistLock; 231 bool _yield; 232 // Whether closure is being used for concurrent precleaning 233 bool _concurrent_precleaning; 234 protected: 235 DO_OOP_WORK_DEFN 236 public: 237 MarkRefsIntoAndScanClosure(MemRegion span, 238 ReferenceProcessor* rp, 239 CMSBitMap* bit_map, 240 CMSBitMap* mod_union_table, 241 CMSMarkStack* mark_stack, 242 CMSCollector* collector, 243 bool should_yield, 244 bool concurrent_precleaning); 245 virtual void do_oop(oop* p); 246 virtual void do_oop(narrowOop* p); 247 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 248 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 249 250 Prefetch::style prefetch_style() { 251 return Prefetch::do_read; 252 } 253 void set_freelistLock(Mutex* m) { 254 _freelistLock = m; 255 } 256 257 private: 258 inline void do_yield_check(); 259 void do_yield_work(); 260 bool take_from_overflow_list(); 261 }; 262 263 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit 264 // stack and the bitMap are shared, so access needs to be suitably 265 // sycnhronized. An OopTaskQueue structure, supporting efficient 266 // workstealing, replaces a CMSMarkStack for storing grey objects. 267 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { 268 private: 269 MemRegion _span; 270 CMSBitMap* _bit_map; 271 OopTaskQueue* _work_queue; 272 const uint _low_water_mark; 273 Par_PushAndMarkClosure _par_pushAndMarkClosure; 274 protected: 275 DO_OOP_WORK_DEFN 276 public: 277 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, 278 MemRegion span, 279 ReferenceProcessor* rp, 280 CMSBitMap* bit_map, 281 OopTaskQueue* work_queue); 282 virtual void do_oop(oop* p); 283 virtual void do_oop(narrowOop* p); 284 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 285 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 286 287 Prefetch::style prefetch_style() { 288 return Prefetch::do_read; 289 } 290 void trim_queue(uint size); 291 }; 292 293 // This closure is used during the concurrent marking phase 294 // following the first checkpoint. Its use is buried in 295 // the closure MarkFromRootsClosure. 296 class PushOrMarkClosure: public CMSOopClosure { 297 private: 298 CMSCollector* _collector; 299 MemRegion _span; 300 CMSBitMap* _bitMap; 301 CMSMarkStack* _markStack; 302 HeapWord* const _finger; 303 MarkFromRootsClosure* const 304 _parent; 305 protected: 306 DO_OOP_WORK_DEFN 307 public: 308 PushOrMarkClosure(CMSCollector* cms_collector, 309 MemRegion span, 310 CMSBitMap* bitMap, 311 CMSMarkStack* markStack, 312 HeapWord* finger, 313 MarkFromRootsClosure* parent); 314 virtual void do_oop(oop* p); 315 virtual void do_oop(narrowOop* p); 316 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } 317 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 318 319 // Deal with a stack overflow condition 320 void handle_stack_overflow(HeapWord* lost); 321 private: 322 inline void do_yield_check(); 323 }; 324 325 // A parallel (MT) version of the above. 326 // This closure is used during the concurrent marking phase 327 // following the first checkpoint. Its use is buried in 328 // the closure Par_MarkFromRootsClosure. 329 class Par_PushOrMarkClosure: public CMSOopClosure { 330 private: 331 CMSCollector* _collector; 332 MemRegion _whole_span; 333 MemRegion _span; // local chunk 334 CMSBitMap* _bit_map; 335 OopTaskQueue* _work_queue; 336 CMSMarkStack* _overflow_stack; 337 HeapWord* const _finger; 338 HeapWord** const _global_finger_addr; 339 Par_MarkFromRootsClosure* const 340 _parent; 341 protected: 342 DO_OOP_WORK_DEFN 343 public: 344 Par_PushOrMarkClosure(CMSCollector* cms_collector, 345 MemRegion span, 346 CMSBitMap* bit_map, 347 OopTaskQueue* work_queue, 348 CMSMarkStack* mark_stack, 349 HeapWord* finger, 350 HeapWord** global_finger_addr, 351 Par_MarkFromRootsClosure* parent); 352 virtual void do_oop(oop* p); 353 virtual void do_oop(narrowOop* p); 354 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 355 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 356 357 // Deal with a stack overflow condition 358 void handle_stack_overflow(HeapWord* lost); 359 private: 360 inline void do_yield_check(); 361 }; 362 363 // For objects in CMS generation, this closure marks 364 // given objects (transitively) as being reachable/live. 365 // This is currently used during the (weak) reference object 366 // processing phase of the CMS final checkpoint step, as 367 // well as during the concurrent precleaning of the discovered 368 // reference lists. 369 class CMSKeepAliveClosure: public CMSOopClosure { 370 private: 371 CMSCollector* _collector; 372 const MemRegion _span; 373 CMSMarkStack* _mark_stack; 374 CMSBitMap* _bit_map; 375 bool _concurrent_precleaning; 376 protected: 377 DO_OOP_WORK_DEFN 378 public: 379 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 380 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 381 bool cpc); 382 bool concurrent_precleaning() const { return _concurrent_precleaning; } 383 virtual void do_oop(oop* p); 384 virtual void do_oop(narrowOop* p); 385 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 386 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 387 }; 388 389 class CMSInnerParMarkAndPushClosure: public CMSOopClosure { 390 private: 391 CMSCollector* _collector; 392 MemRegion _span; 393 OopTaskQueue* _work_queue; 394 CMSBitMap* _bit_map; 395 protected: 396 DO_OOP_WORK_DEFN 397 public: 398 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 399 MemRegion span, CMSBitMap* bit_map, 400 OopTaskQueue* work_queue); 401 virtual void do_oop(oop* p); 402 virtual void do_oop(narrowOop* p); 403 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 404 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 405 }; 406 407 // A parallel (MT) version of the above, used when 408 // reference processing is parallel; the only difference 409 // is in the do_oop method. 410 class CMSParKeepAliveClosure: public CMSOopClosure { 411 private: 412 MemRegion _span; 413 OopTaskQueue* _work_queue; 414 CMSBitMap* _bit_map; 415 CMSInnerParMarkAndPushClosure 416 _mark_and_push; 417 const uint _low_water_mark; 418 void trim_queue(uint max); 419 protected: 420 DO_OOP_WORK_DEFN 421 public: 422 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 423 CMSBitMap* bit_map, OopTaskQueue* work_queue); 424 virtual void do_oop(oop* p); 425 virtual void do_oop(narrowOop* p); 426 }; 427 428 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP