1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP 26 #define SHARE_GC_SHARED_OOPSTORAGE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "metaprogramming/conditional.hpp" 30 #include "metaprogramming/isConst.hpp" 31 #include "oops/oop.hpp" 32 #include "utilities/count_trailing_zeros.hpp" 33 #include "utilities/debug.hpp" 34 #include "utilities/globalDefinitions.hpp" 35 #include "utilities/macros.hpp" 36 37 class Mutex; 38 class outputStream; 39 40 // OopStorage supports management of off-heap references to objects allocated 41 // in the Java heap. An OopStorage object provides a set of Java object 42 // references (oop values), which clients refer to via oop* handles to the 43 // associated OopStorage entries. Clients allocate entries to create a 44 // (possibly weak) reference to a Java object, use that reference, and release 45 // the reference when no longer needed. 46 // 47 // The garbage collector must know about all OopStorage objects and their 48 // reference strength. OopStorage provides the garbage collector with support 49 // for iteration over all the allocated entries. 50 // 51 // There are several categories of interaction with an OopStorage object. 52 // 53 // (1) allocation and release of entries, by the mutator or the VM. 54 // (2) iteration by the garbage collector, possibly concurrent with mutator. 55 // (3) iteration by other, non-GC, tools (only at safepoints). 56 // (4) cleanup of unused internal storage, possibly concurrent with mutator. 57 // 58 // A goal of OopStorage is to make these interactions thread-safe, while 59 // minimizing potential lock contention issues within and between these 60 // categories. In particular, support for concurrent iteration by the garbage 61 // collector, under certain restrictions, is required. Further, it must not 62 // block nor be blocked by other operations for long periods. 63 // 64 // Internally, OopStorage is a set of Block objects, from which entries are 65 // allocated and released. A block contains an oop[] and a bitmask indicating 66 // which entries are in use (have been allocated and not yet released). New 67 // blocks are constructed and added to the storage object when an entry 68 // allocation request is made and there are no blocks with unused entries. 69 // Blocks may be removed and deleted when empty. 70 // 71 // There are two important (and somewhat intertwined) protocols governing 72 // concurrent access to a storage object. These are the Concurrent Iteration 73 // Protocol and the Allocation Protocol. See the ParState class for a 74 // discussion of concurrent iteration and the management of thread 75 // interactions for this protocol. Similarly, see the allocate() function for 76 // a discussion of allocation. 77 78 class OopStorage : public CHeapObj<mtGC> { 79 public: 80 OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex); 81 ~OopStorage(); 82 83 // These count and usage accessors are racy unless at a safepoint. 84 85 // The number of allocated and not yet released entries. 86 size_t allocation_count() const; 87 88 // The number of blocks of entries. Useful for sizing parallel iteration. 89 size_t block_count() const; 90 91 // The number of blocks with no allocated entries. Useful for sizing 92 // parallel iteration and scheduling block deletion. 93 size_t empty_block_count() const; 94 95 // Total number of blocks * memory allocation per block, plus 96 // bookkeeping overhead, including this storage object. 97 size_t total_memory_usage() const; 98 99 enum EntryStatus { 100 INVALID_ENTRY, 101 UNALLOCATED_ENTRY, 102 ALLOCATED_ENTRY 103 }; 104 105 // Locks _allocate_mutex. 106 EntryStatus allocation_status(const oop* ptr) const; 107 108 // Allocates and returns a new entry. Returns NULL if memory allocation 109 // failed. Locks _allocate_mutex. 110 // postcondition: *result == NULL. 111 oop* allocate(); 112 113 // Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex. 114 // precondition: ptr is a valid allocated entry. 115 // precondition: *ptr == NULL. 116 void release(const oop* ptr); 117 118 // Releases all the ptrs. Possibly faster than individual calls to 119 // release(oop*). Best if ptrs is sorted by address. Locks 120 // _allocate_mutex. 121 // precondition: All elements of ptrs are valid allocated entries. 122 // precondition: *ptrs[i] == NULL, for i in [0,size). 123 void release(const oop* const* ptrs, size_t size); 124 125 // Applies f to each allocated entry's location. f must be a function or 126 // function object. Assume p is either a const oop* or an oop*, depending 127 // on whether the associated storage is const or non-const, respectively. 128 // Then f(p) must be a valid expression. The result of invoking f(p) must 129 // be implicitly convertible to bool. Iteration terminates and returns 130 // false if any invocation of f returns false. Otherwise, the result of 131 // iteration is true. 132 // precondition: at safepoint. 133 template<typename F> bool iterate_safepoint(F f); 134 template<typename F> bool iterate_safepoint(F f) const; 135 136 // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing 137 // an adaptation layer allowing the use of existing is-alive closures and 138 // OopClosures. Assume p is either const oop* or oop*, depending on whether 139 // the associated storage is const or non-const, respectively. Then 140 // 141 // - closure->do_oop(p) must be a valid expression whose value is ignored. 142 // 143 // - is_alive->do_object_b(*p) must be a valid expression whose value is 144 // convertible to bool. 145 // 146 // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be 147 // invoked for p. If is_alive->do_object_b(*p) is false, then closure will 148 // not be invoked on p, and *p will be set to NULL. 149 150 template<typename Closure> void oops_do(Closure* closure); 151 template<typename Closure> void oops_do(Closure* closure) const; 152 template<typename Closure> void weak_oops_do(Closure* closure); 153 154 template<typename IsAliveClosure, typename Closure> 155 void weak_oops_do(IsAliveClosure* is_alive, Closure* closure); 156 157 #if INCLUDE_ALL_GCS 158 // Parallel iteration is for the exclusive use of the GC. 159 // Other clients must use serial iteration. 160 template<bool concurrent, bool is_const> class ParState; 161 #endif // INCLUDE_ALL_GCS 162 163 // Block cleanup functions are for the exclusive use of the GC. 164 // Both stop deleting if there is an in-progress concurrent iteration. 165 // Concurrent deletion locks both the allocate_mutex and the active_mutex. 166 void delete_empty_blocks_safepoint(size_t retain = 1); 167 void delete_empty_blocks_concurrent(size_t retain = 1); 168 169 // Debugging and logging support. 170 const char* name() const; 171 void print_on(outputStream* st) const PRODUCT_RETURN; 172 173 // Provides access to storage internals, for unit testing. 174 class TestAccess; 175 176 private: 177 class Block; 178 class BlockList; 179 180 class BlockEntry VALUE_OBJ_CLASS_SPEC { 181 friend class BlockList; 182 183 // Members are mutable, and we deal exclusively with pointers to 184 // const, to make const blocks easier to use; a block being const 185 // doesn't prevent modifying its list state. 186 mutable const Block* _prev; 187 mutable const Block* _next; 188 189 // Noncopyable. 190 BlockEntry(const BlockEntry&); 191 BlockEntry& operator=(const BlockEntry&); 192 193 public: 194 BlockEntry(); 195 }; 196 197 class BlockList VALUE_OBJ_CLASS_SPEC { 198 const Block* _head; 199 const Block* _tail; 200 const BlockEntry& (*_get_entry)(const Block& block); 201 202 // Noncopyable. 203 BlockList(const BlockList&); 204 BlockList& operator=(const BlockList&); 205 206 public: 207 BlockList(const BlockEntry& (*get_entry)(const Block& block)); 208 209 Block* head(); 210 const Block* chead() const; 211 const Block* ctail() const; 212 213 Block* prev(Block& block); 214 Block* next(Block& block); 215 216 const Block* prev(const Block& block) const; 217 const Block* next(const Block& block) const; 218 219 void push_front(const Block& block); 220 void push_back(const Block& block); 221 void unlink(const Block& block); 222 }; 223 224 class Block /* No base class, to avoid messing up alignment requirements */ { 225 // _data must be the first non-static data member, for alignment. 226 oop _data[BitsPerWord]; 227 static const unsigned _data_pos = 0; // Position of _data. 228 229 volatile uintx _allocated_bitmask; // One bit per _data element. 230 const OopStorage* _owner; 231 void* _memory; // Unaligned storage containing block. 232 BlockEntry _active_entry; 233 BlockEntry _allocate_entry; 234 235 Block(const OopStorage* owner, void* memory); 236 ~Block(); 237 238 void check_index(unsigned index) const; 239 unsigned get_index(const oop* ptr) const; 240 241 template<typename F, typename BlockPtr> 242 static bool iterate_impl(F f, BlockPtr b); 243 244 // Noncopyable. 245 Block(const Block&); 246 Block& operator=(const Block&); 247 248 public: 249 static const BlockEntry& get_active_entry(const Block& block); 250 static const BlockEntry& get_allocate_entry(const Block& block); 251 252 static size_t allocation_size(); 253 static size_t allocation_alignment_shift(); 254 255 oop* get_pointer(unsigned index); 256 const oop* get_pointer(unsigned index) const; 257 258 uintx bitmask_for_index(unsigned index) const; 259 uintx bitmask_for_entry(const oop* ptr) const; 260 261 // Allocation bitmask accessors are racy. 262 bool is_full() const; 263 bool is_empty() const; 264 uintx allocated_bitmask() const; 265 uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value); 266 267 bool contains(const oop* ptr) const; 268 269 // Returns NULL if ptr is not in a block or not allocated in that block. 270 static Block* block_for_ptr(const OopStorage* owner, const oop* ptr); 271 272 oop* allocate(); 273 static Block* new_block(const OopStorage* owner); 274 static void delete_block(const Block& block); 275 276 template<typename F> bool iterate(F f); 277 template<typename F> bool iterate(F f) const; 278 }; // class Block 279 280 const char* _name; 281 BlockList _active_list; 282 BlockList _allocate_list; 283 Block* volatile _active_head; 284 285 Mutex* _allocate_mutex; 286 Mutex* _active_mutex; 287 288 // Counts are volatile for racy unlocked accesses. 289 volatile size_t _allocation_count; 290 volatile size_t _block_count; 291 volatile size_t _empty_block_count; 292 // mutable because this gets set even for const iteration. 293 mutable bool _concurrent_iteration_active; 294 295 Block* find_block_or_null(const oop* ptr) const; 296 bool is_valid_block_locked_or_safepoint(const Block* block) const; 297 EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const; 298 void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN; 299 void release_from_block(Block& block, uintx release_bitmask); 300 void delete_empty_block(const Block& block); 301 302 static void assert_at_safepoint() NOT_DEBUG_RETURN; 303 304 template<typename F, typename Storage> // Storage := [const] OopStorage 305 static bool iterate_impl(F f, Storage* storage); 306 307 #if INCLUDE_ALL_GCS 308 // Implementation support for parallel iteration 309 class BasicParState; 310 #endif // INCLUDE_ALL_GCS 311 312 // Wrapper for OopClosure-style function, so it can be used with 313 // iterate. Assume p is of type oop*. Then cl->do_oop(p) must be a 314 // valid expression whose value may be ignored. 315 template<typename Closure> class OopFn; 316 template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl); 317 318 // Wrapper for BoolObjectClosure + iteration handler pair, so they 319 // can be used with iterate. 320 template<typename IsAlive, typename F> class IfAliveFn; 321 template<typename IsAlive, typename F> 322 static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f); 323 324 // Wrapper for iteration handler, automatically skipping NULL entries. 325 template<typename F> class SkipNullFn; 326 template<typename F> static SkipNullFn<F> skip_null_fn(F f); 327 328 // Wrapper for iteration handler; ignore handler result and return true. 329 template<typename F> class AlwaysTrueFn; 330 }; 331 332 inline OopStorage::Block* OopStorage::BlockList::head() { 333 return const_cast<Block*>(_head); 334 } 335 336 inline const OopStorage::Block* OopStorage::BlockList::chead() const { 337 return _head; 338 } 339 340 inline const OopStorage::Block* OopStorage::BlockList::ctail() const { 341 return _tail; 342 } 343 344 inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) { 345 return const_cast<Block*>(_get_entry(block)._prev); 346 } 347 348 inline OopStorage::Block* OopStorage::BlockList::next(Block& block) { 349 return const_cast<Block*>(_get_entry(block)._next); 350 } 351 352 inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const { 353 return _get_entry(block)._prev; 354 } 355 356 inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const { 357 return _get_entry(block)._next; 358 } 359 360 template<typename Closure> 361 class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC { 362 public: 363 explicit OopFn(Closure* cl) : _cl(cl) {} 364 365 template<typename OopPtr> // [const] oop* 366 bool operator()(OopPtr ptr) const { 367 _cl->do_oop(ptr); 368 return true; 369 } 370 371 private: 372 Closure* _cl; 373 }; 374 375 template<typename Closure> 376 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) { 377 return OopFn<Closure>(cl); 378 } 379 380 template<typename IsAlive, typename F> 381 class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC { 382 public: 383 IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {} 384 385 bool operator()(oop* ptr) const { 386 bool result = true; 387 oop v = *ptr; 388 if (v != NULL) { 389 if (_is_alive->do_object_b(v)) { 390 result = _f(ptr); 391 } else { 392 *ptr = NULL; // Clear dead value. 393 } 394 } 395 return result; 396 } 397 398 private: 399 IsAlive* _is_alive; 400 F _f; 401 }; 402 403 template<typename IsAlive, typename F> 404 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) { 405 return IfAliveFn<IsAlive, F>(is_alive, f); 406 } 407 408 template<typename F> 409 class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC { 410 public: 411 SkipNullFn(F f) : _f(f) {} 412 413 template<typename OopPtr> // [const] oop* 414 bool operator()(OopPtr ptr) const { 415 return (*ptr != NULL) ? _f(ptr) : true; 416 } 417 418 private: 419 F _f; 420 }; 421 422 template<typename F> 423 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) { 424 return SkipNullFn<F>(f); 425 } 426 427 template<typename F> 428 class OopStorage::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC { 429 F _f; 430 431 public: 432 AlwaysTrueFn(F f) : _f(f) {} 433 434 template<typename OopPtr> // [const] oop* 435 bool operator()(OopPtr ptr) const { _f(ptr); return true; } 436 }; 437 438 // Inline Block accesses for use in iteration inner loop. 439 440 inline void OopStorage::Block::check_index(unsigned index) const { 441 assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index); 442 } 443 444 inline oop* OopStorage::Block::get_pointer(unsigned index) { 445 check_index(index); 446 return &_data[index]; 447 } 448 449 inline const oop* OopStorage::Block::get_pointer(unsigned index) const { 450 check_index(index); 451 return &_data[index]; 452 } 453 454 inline uintx OopStorage::Block::allocated_bitmask() const { 455 return _allocated_bitmask; 456 } 457 458 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const { 459 check_index(index); 460 return uintx(1) << index; 461 } 462 463 template<typename F, typename BlockPtr> // [const] Block* 464 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) { 465 uintx bitmask = block->allocated_bitmask(); 466 while (bitmask != 0) { 467 unsigned index = count_trailing_zeros(bitmask); 468 bitmask ^= block->bitmask_for_index(index); 469 if (!f(block->get_pointer(index))) { 470 return false; 471 } 472 } 473 return true; 474 } 475 476 template<typename F> 477 inline bool OopStorage::Block::iterate(F f) { 478 return iterate_impl(f, this); 479 } 480 481 template<typename F> 482 inline bool OopStorage::Block::iterate(F f) const { 483 return iterate_impl(f, this); 484 } 485 486 ////////////////////////////////////////////////////////////////////////////// 487 // Support for serial iteration, always at a safepoint. 488 489 template<typename F, typename Storage> // Storage := [const] OopStorage 490 inline bool OopStorage::iterate_impl(F f, Storage* storage) { 491 assert_at_safepoint(); 492 typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr; 493 for (BlockPtr block = storage->_active_head; 494 block != NULL; 495 block = storage->_active_list.next(*block)) { 496 if (!block->iterate(f)) { 497 return false; 498 } 499 } 500 return true; 501 } 502 503 template<typename F> 504 inline bool OopStorage::iterate_safepoint(F f) { 505 return iterate_impl(f, this); 506 } 507 508 template<typename F> 509 inline bool OopStorage::iterate_safepoint(F f) const { 510 return iterate_impl(f, this); 511 } 512 513 template<typename Closure> 514 inline void OopStorage::oops_do(Closure* cl) { 515 iterate_safepoint(oop_fn(cl)); 516 } 517 518 template<typename Closure> 519 inline void OopStorage::oops_do(Closure* cl) const { 520 iterate_safepoint(oop_fn(cl)); 521 } 522 523 template<typename Closure> 524 inline void OopStorage::weak_oops_do(Closure* cl) { 525 iterate_safepoint(skip_null_fn(oop_fn(cl))); 526 } 527 528 template<typename IsAliveClosure, typename Closure> 529 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { 530 iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl))); 531 } 532 533 #if INCLUDE_ALL_GCS 534 535 ////////////////////////////////////////////////////////////////////////////// 536 // Support for parallel and optionally concurrent state iteration. 537 // 538 // Parallel iteration is for the exclusive use of the GC. Other iteration 539 // clients must use serial iteration. 540 // 541 // Concurrent Iteration 542 // 543 // Iteration involves the _active_list, which contains all of the blocks owned 544 // by a storage object. This is a doubly-linked list, linked through 545 // dedicated fields in the blocks. 546 // 547 // At most one concurrent ParState can exist at a time for a given storage 548 // object. 549 // 550 // A concurrent ParState sets the associated storage's 551 // _concurrent_iteration_active flag true when the state is constructed, and 552 // sets it false when the state is destroyed. These assignments are made with 553 // _active_mutex locked. Meanwhile, empty block deletion is not done while 554 // _concurrent_iteration_active is true. The flag check and the dependent 555 // removal of a block from the _active_list is performed with _active_mutex 556 // locked. This prevents concurrent iteration and empty block deletion from 557 // interfering with with each other. 558 // 559 // Both allocate() and delete_empty_blocks_concurrent() lock the 560 // _allocate_mutex while performing their respective list manipulations, 561 // preventing them from interfering with each other. 562 // 563 // When allocate() creates a new block, it is added to the front of the 564 // _active_list. Then _active_head is set to the new block. When concurrent 565 // iteration is started (by a parallel worker thread calling the state's 566 // iterate() function), the current _active_head is used as the initial block 567 // for the iteration, with iteration proceeding down the list headed by that 568 // block. 569 // 570 // As a result, the list over which concurrent iteration operates is stable. 571 // However, once the iteration is started, later allocations may add blocks to 572 // the front of the list that won't be examined by the iteration. And while 573 // the list is stable, concurrent allocate() and release() operations may 574 // change the set of allocated entries in a block at any time during the 575 // iteration. 576 // 577 // As a result, a concurrent iteration handler must accept that some 578 // allocations and releases that occur after the iteration started will not be 579 // seen by the iteration. Further, some may overlap examination by the 580 // iteration. To help with this, allocate() and release() have an invariant 581 // that an entry's value must be NULL when it is not in use. 582 // 583 // An in-progress delete_empty_blocks_concurrent() operation can contend with 584 // the start of a concurrent iteration over the _active_mutex. Since both are 585 // under GC control, that potential contention can be eliminated by never 586 // scheduling both operations to run at the same time. 587 // 588 // ParState<concurrent, is_const> 589 // concurrent must be true if iteration is concurrent with the 590 // mutator, false if iteration is at a safepoint. 591 // 592 // is_const must be true if the iteration is over a constant storage 593 // object, false if the iteration may modify the storage object. 594 // 595 // ParState([const] OopStorage* storage) 596 // Construct an object for managing an iteration over storage. For a 597 // concurrent ParState, empty block deletion for the associated storage 598 // is inhibited for the life of the ParState. There can be no more 599 // than one live concurrent ParState at a time for a given storage object. 600 // 601 // template<typename F> void iterate(F f) 602 // Repeatedly claims a block from the associated storage that has 603 // not been processed by this iteration (possibly by other threads), 604 // and applies f to each entry in the claimed block. Concurrent uses 605 // must be prepared for an entry's value to change at any time, due 606 // to mutator activity. 607 // 608 // template<typename Closure> void oops_do(Closure* cl) 609 // Wrapper around iterate, providing an adaptation layer allowing 610 // the use of OopClosures and similar objects for iteration. Assume 611 // p is of type const oop* or oop*, according to is_const. Then 612 // cl->do_oop(p) must be a valid expression whose value is ignored. 613 // Concurrent uses must be prepared for the entry's value to change 614 // at any time, due to mutator activity. 615 // 616 // Optional operations, provided only if !concurrent && !is_const. 617 // These are not provided when is_const, because the storage object 618 // may be modified by the iteration infrastructure, even if the 619 // provided closure doesn't modify the storage object. These are not 620 // provided when concurrent because any pre-filtering behavior by the 621 // iteration infrastructure is inappropriate for concurrent iteration; 622 // modifications of the storage by the mutator could result in the 623 // pre-filtering being applied (successfully or not) to objects that 624 // are unrelated to what the closure finds in the entry. 625 // 626 // template<typename Closure> void weak_oops_do(Closure* cl) 627 // template<typename IsAliveClosure, typename Closure> 628 // void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) 629 // Wrappers around iterate, providing an adaptation layer allowing 630 // the use of is-alive closures and OopClosures for iteration. 631 // Assume p is of type oop*. Then 632 // 633 // - cl->do_oop(p) must be a valid expression whose value is ignored. 634 // 635 // - is_alive->do_object_b(*p) must be a valid expression whose value 636 // is convertible to bool. 637 // 638 // If *p == NULL then neither is_alive nor cl will be invoked for p. 639 // If is_alive->do_object_b(*p) is false, then cl will not be 640 // invoked on p. 641 642 class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC { 643 public: 644 BasicParState(OopStorage* storage, bool concurrent); 645 ~BasicParState(); 646 647 template<bool is_const, typename F> void iterate(F f) { 648 // Wrap f in ATF so we can use Block::iterate. 649 AlwaysTrueFn<F> atf_f(f); 650 ensure_iteration_started(); 651 typename Conditional<is_const, const Block*, Block*>::type block; 652 while ((block = claim_next_block()) != NULL) { 653 block->iterate(atf_f); 654 } 655 } 656 657 private: 658 OopStorage* _storage; 659 void* volatile _next_block; 660 bool _concurrent; 661 662 // Noncopyable. 663 BasicParState(const BasicParState&); 664 BasicParState& operator=(const BasicParState&); 665 666 void update_iteration_state(bool value); 667 void ensure_iteration_started(); 668 Block* claim_next_block(); 669 }; 670 671 template<bool concurrent, bool is_const> 672 class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { 673 BasicParState _basic_state; 674 675 public: 676 ParState(const OopStorage* storage) : 677 // For simplicity, always recorded as non-const. 678 _basic_state(const_cast<OopStorage*>(storage), concurrent) 679 {} 680 681 template<typename F> 682 void iterate(F f) { 683 _basic_state.template iterate<is_const>(f); 684 } 685 686 template<typename Closure> 687 void oops_do(Closure* cl) { 688 this->iterate(oop_fn(cl)); 689 } 690 }; 691 692 template<> 693 class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC { 694 BasicParState _basic_state; 695 696 public: 697 ParState(OopStorage* storage) : 698 _basic_state(storage, false) 699 {} 700 701 template<typename F> 702 void iterate(F f) { 703 _basic_state.template iterate<false>(f); 704 } 705 706 template<typename Closure> 707 void oops_do(Closure* cl) { 708 this->iterate(oop_fn(cl)); 709 } 710 711 template<typename Closure> 712 void weak_oops_do(Closure* cl) { 713 this->iterate(skip_null_fn(oop_fn(cl))); 714 } 715 716 template<typename IsAliveClosure, typename Closure> 717 void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { 718 this->iterate(if_alive_fn(is_alive, oop_fn(cl))); 719 } 720 }; 721 722 #endif // INCLUDE_ALL_GCS 723 724 #endif // include guard