1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_SPACE_HPP 26 #define SHARE_VM_GC_SHARED_SPACE_HPP 27 28 #include "gc/shared/blockOffsetTable.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/workgroup.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/iterator.hpp" 33 #include "memory/memRegion.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/macros.hpp" 38 39 // A space is an abstraction for the "storage units" backing 40 // up the generation abstraction. It includes specific 41 // implementations for keeping track of free and used space, 42 // for iterating over objects and free blocks, etc. 43 44 // Forward decls. 45 class Space; 46 class BlockOffsetArray; 47 class BlockOffsetArrayContigSpace; 48 class Generation; 49 class CompactibleSpace; 50 class BlockOffsetTable; 51 class CardTableRS; 52 class DirtyCardToOopClosure; 53 54 // A Space describes a heap area. Class Space is an abstract 55 // base class. 56 // 57 // Space supports allocation, size computation and GC support is provided. 58 // 59 // Invariant: bottom() and end() are on page_size boundaries and 60 // bottom() <= top() <= end() 61 // top() is inclusive and end() is exclusive. 62 63 class Space: public CHeapObj<mtGC> { 64 friend class VMStructs; 65 protected: 66 HeapWord* _bottom; 67 HeapWord* _end; 68 69 // Used in support of save_marks() 70 HeapWord* _saved_mark_word; 71 72 // A sequential tasks done structure. This supports 73 // parallel GC, where we have threads dynamically 74 // claiming sub-tasks from a larger parallel task. 75 SequentialSubTasksDone _par_seq_tasks; 76 77 Space(): 78 _bottom(NULL), _end(NULL) { } 79 80 public: 81 // Accessors 82 HeapWord* bottom() const { return _bottom; } 83 HeapWord* end() const { return _end; } 84 virtual void set_bottom(HeapWord* value) { _bottom = value; } 85 virtual void set_end(HeapWord* value) { _end = value; } 86 87 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 88 89 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 90 91 // Returns true if this object has been allocated since a 92 // generation's "save_marks" call. 93 virtual bool obj_allocated_since_save_marks(const oop obj) const { 94 return (HeapWord*)obj >= saved_mark_word(); 95 } 96 97 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { 98 return NULL; 99 } 100 101 // Returns a subregion of the space containing only the allocated objects in 102 // the space. 103 virtual MemRegion used_region() const = 0; 104 105 // Returns a region that is guaranteed to contain (at least) all objects 106 // allocated at the time of the last call to "save_marks". If the space 107 // initializes its DirtyCardToOopClosure's specifying the "contig" option 108 // (that is, if the space is contiguous), then this region must contain only 109 // such objects: the memregion will be from the bottom of the region to the 110 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 111 // the space must distinguish between objects in the region allocated before 112 // and after the call to save marks. 113 MemRegion used_region_at_save_marks() const { 114 return MemRegion(bottom(), saved_mark_word()); 115 } 116 117 // Initialization. 118 // "initialize" should be called once on a space, before it is used for 119 // any purpose. The "mr" arguments gives the bounds of the space, and 120 // the "clear_space" argument should be true unless the memory in "mr" is 121 // known to be zeroed. 122 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 123 124 // The "clear" method must be called on a region that may have 125 // had allocation performed in it, but is now to be considered empty. 126 virtual void clear(bool mangle_space); 127 128 // For detecting GC bugs. Should only be called at GC boundaries, since 129 // some unused space may be used as scratch space during GC's. 130 // We also call this when expanding a space to satisfy an allocation 131 // request. See bug #4668531 132 virtual void mangle_unused_area() = 0; 133 virtual void mangle_unused_area_complete() = 0; 134 135 // Testers 136 bool is_empty() const { return used() == 0; } 137 bool not_empty() const { return used() > 0; } 138 139 // Returns true iff the given the space contains the 140 // given address as part of an allocated object. For 141 // certain kinds of spaces, this might be a potentially 142 // expensive operation. To prevent performance problems 143 // on account of its inadvertent use in product jvm's, 144 // we restrict its use to assertion checks only. 145 bool is_in(const void* p) const { 146 return used_region().contains(p); 147 } 148 bool is_in(oop obj) const { 149 return is_in((void*)obj); 150 } 151 152 // Returns true iff the given reserved memory of the space contains the 153 // given address. 154 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 155 156 // Returns true iff the given block is not allocated. 157 virtual bool is_free_block(const HeapWord* p) const = 0; 158 159 // Test whether p is double-aligned 160 static bool is_aligned(void* p) { 161 return ::is_aligned(p, sizeof(double)); 162 } 163 164 // Size computations. Sizes are in bytes. 165 size_t capacity() const { return byte_size(bottom(), end()); } 166 virtual size_t used() const = 0; 167 virtual size_t free() const = 0; 168 169 // Iterate over all the ref-containing fields of all objects in the 170 // space, calling "cl.do_oop" on each. Fields in objects allocated by 171 // applications of the closure are not included in the iteration. 172 virtual void oop_iterate(ExtendedOopClosure* cl); 173 174 // Iterate over all objects in the space, calling "cl.do_object" on 175 // each. Objects allocated by applications of the closure are not 176 // included in the iteration. 177 virtual void object_iterate(ObjectClosure* blk) = 0; 178 // Similar to object_iterate() except only iterates over 179 // objects whose internal references point to objects in the space. 180 virtual void safe_object_iterate(ObjectClosure* blk) = 0; 181 182 // Create and return a new dirty card to oop closure. Can be 183 // overridden to return the appropriate type of closure 184 // depending on the type of space in which the closure will 185 // operate. ResourceArea allocated. 186 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 187 CardTable::PrecisionStyle precision, 188 HeapWord* boundary, 189 bool parallel); 190 191 // If "p" is in the space, returns the address of the start of the 192 // "block" that contains "p". We say "block" instead of "object" since 193 // some heaps may not pack objects densely; a chunk may either be an 194 // object or a non-object. If "p" is not in the space, return NULL. 195 virtual HeapWord* block_start_const(const void* p) const = 0; 196 197 // The non-const version may have benevolent side effects on the data 198 // structure supporting these calls, possibly speeding up future calls. 199 // The default implementation, however, is simply to call the const 200 // version. 201 virtual HeapWord* block_start(const void* p); 202 203 // Requires "addr" to be the start of a chunk, and returns its size. 204 // "addr + size" is required to be the start of a new chunk, or the end 205 // of the active area of the heap. 206 virtual size_t block_size(const HeapWord* addr) const = 0; 207 208 // Requires "addr" to be the start of a block, and returns "TRUE" iff 209 // the block is an object. 210 virtual bool block_is_obj(const HeapWord* addr) const = 0; 211 212 // Requires "addr" to be the start of a block, and returns "TRUE" iff 213 // the block is an object and the object is alive. 214 virtual bool obj_is_alive(const HeapWord* addr) const; 215 216 // Allocation (return NULL if full). Assumes the caller has established 217 // mutually exclusive access to the space. 218 virtual HeapWord* allocate(size_t word_size) = 0; 219 220 // Allocation (return NULL if full). Enforces mutual exclusion internally. 221 virtual HeapWord* par_allocate(size_t word_size) = 0; 222 223 // Mark-sweep-compact support: all spaces can update pointers to objects 224 // moving as a part of compaction. 225 virtual void adjust_pointers() = 0; 226 227 virtual void print() const; 228 virtual void print_on(outputStream* st) const; 229 virtual void print_short() const; 230 virtual void print_short_on(outputStream* st) const; 231 232 233 // Accessor for parallel sequential tasks. 234 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 235 236 // IF "this" is a ContiguousSpace, return it, else return NULL. 237 virtual ContiguousSpace* toContiguousSpace() { 238 return NULL; 239 } 240 241 // Debugging 242 virtual void verify() const = 0; 243 }; 244 245 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 246 // OopClosure to (the addresses of) all the ref-containing fields that could 247 // be modified by virtue of the given MemRegion being dirty. (Note that 248 // because of the imprecise nature of the write barrier, this may iterate 249 // over oops beyond the region.) 250 // This base type for dirty card to oop closures handles memory regions 251 // in non-contiguous spaces with no boundaries, and should be sub-classed 252 // to support other space types. See ContiguousDCTOC for a sub-class 253 // that works with ContiguousSpaces. 254 255 class DirtyCardToOopClosure: public MemRegionClosureRO { 256 protected: 257 ExtendedOopClosure* _cl; 258 Space* _sp; 259 CardTable::PrecisionStyle _precision; 260 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 261 // pointing below boundary. 262 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 263 // a downwards traversal; this is the 264 // lowest location already done (or, 265 // alternatively, the lowest address that 266 // shouldn't be done again. NULL means infinity.) 267 NOT_PRODUCT(HeapWord* _last_bottom;) 268 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 269 270 // Get the actual top of the area on which the closure will 271 // operate, given where the top is assumed to be (the end of the 272 // memory region passed to do_MemRegion) and where the object 273 // at the top is assumed to start. For example, an object may 274 // start at the top but actually extend past the assumed top, 275 // in which case the top becomes the end of the object. 276 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 277 278 // Walk the given memory region from bottom to (actual) top 279 // looking for objects and applying the oop closure (_cl) to 280 // them. The base implementation of this treats the area as 281 // blocks, where a block may or may not be an object. Sub- 282 // classes should override this to provide more accurate 283 // or possibly more efficient walking. 284 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 285 286 public: 287 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, 288 CardTable::PrecisionStyle precision, 289 HeapWord* boundary) : 290 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 291 _min_done(NULL) { 292 NOT_PRODUCT(_last_bottom = NULL); 293 NOT_PRODUCT(_last_explicit_min_done = NULL); 294 } 295 296 void do_MemRegion(MemRegion mr); 297 298 void set_min_done(HeapWord* min_done) { 299 _min_done = min_done; 300 NOT_PRODUCT(_last_explicit_min_done = _min_done); 301 } 302 #ifndef PRODUCT 303 void set_last_bottom(HeapWord* last_bottom) { 304 _last_bottom = last_bottom; 305 } 306 #endif 307 }; 308 309 // A structure to represent a point at which objects are being copied 310 // during compaction. 311 class CompactPoint : public StackObj { 312 public: 313 Generation* gen; 314 CompactibleSpace* space; 315 HeapWord* threshold; 316 317 CompactPoint(Generation* g = NULL) : 318 gen(g), space(NULL), threshold(0) {} 319 }; 320 321 // A space that supports compaction operations. This is usually, but not 322 // necessarily, a space that is normally contiguous. But, for example, a 323 // free-list-based space whose normal collection is a mark-sweep without 324 // compaction could still support compaction in full GC's. 325 // 326 // The compaction operations are implemented by the 327 // scan_and_{adjust_pointers,compact,forward} function templates. 328 // The following are, non-virtual, auxiliary functions used by these function templates: 329 // - scan_limit() 330 // - scanned_block_is_obj() 331 // - scanned_block_size() 332 // - adjust_obj_size() 333 // - obj_size() 334 // These functions are to be used exclusively by the scan_and_* function templates, 335 // and must be defined for all (non-abstract) subclasses of CompactibleSpace. 336 // 337 // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior 338 // in any of the auxiliary functions must also override the corresponding 339 // prepare_for_compaction/adjust_pointers/compact functions using them. 340 // If not, such changes will not be used or have no effect on the compaction operations. 341 // 342 // This translates to the following dependencies: 343 // Overrides/definitions of 344 // - scan_limit 345 // - scanned_block_is_obj 346 // - scanned_block_size 347 // require override/definition of prepare_for_compaction(). 348 // Similar dependencies exist between 349 // - adjust_obj_size and adjust_pointers() 350 // - obj_size and compact(). 351 // 352 // Additionally, this also means that changes to block_size() or block_is_obj() that 353 // should be effective during the compaction operations must provide a corresponding 354 // definition of scanned_block_size/scanned_block_is_obj respectively. 355 class CompactibleSpace: public Space { 356 friend class VMStructs; 357 friend class CompactibleFreeListSpace; 358 private: 359 HeapWord* _compaction_top; 360 CompactibleSpace* _next_compaction_space; 361 362 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. 363 inline size_t adjust_obj_size(size_t size) const { 364 return size; 365 } 366 367 inline size_t obj_size(const HeapWord* addr) const; 368 369 template <class SpaceType> 370 static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN; 371 372 template <class SpaceType> 373 static inline void clear_empty_region(SpaceType* space); 374 375 public: 376 CompactibleSpace() : 377 _compaction_top(NULL), _next_compaction_space(NULL) {} 378 379 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 380 virtual void clear(bool mangle_space); 381 382 // Used temporarily during a compaction phase to hold the value 383 // top should have when compaction is complete. 384 HeapWord* compaction_top() const { return _compaction_top; } 385 386 void set_compaction_top(HeapWord* value) { 387 assert(value == NULL || (value >= bottom() && value <= end()), 388 "should point inside space"); 389 _compaction_top = value; 390 } 391 392 // Perform operations on the space needed after a compaction 393 // has been performed. 394 virtual void reset_after_compaction() = 0; 395 396 // Returns the next space (in the current generation) to be compacted in 397 // the global compaction order. Also is used to select the next 398 // space into which to compact. 399 400 virtual CompactibleSpace* next_compaction_space() const { 401 return _next_compaction_space; 402 } 403 404 void set_next_compaction_space(CompactibleSpace* csp) { 405 _next_compaction_space = csp; 406 } 407 408 // MarkSweep support phase2 409 410 // Start the process of compaction of the current space: compute 411 // post-compaction addresses, and insert forwarding pointers. The fields 412 // "cp->gen" and "cp->compaction_space" are the generation and space into 413 // which we are currently compacting. This call updates "cp" as necessary, 414 // and leaves the "compaction_top" of the final value of 415 // "cp->compaction_space" up-to-date. Offset tables may be updated in 416 // this phase as if the final copy had occurred; if so, "cp->threshold" 417 // indicates when the next such action should be taken. 418 virtual void prepare_for_compaction(CompactPoint* cp) = 0; 419 // MarkSweep support phase3 420 virtual void adjust_pointers(); 421 // MarkSweep support phase4 422 virtual void compact(); 423 424 // The maximum percentage of objects that can be dead in the compacted 425 // live part of a compacted space ("deadwood" support.) 426 virtual size_t allowed_dead_ratio() const { return 0; }; 427 428 // Some contiguous spaces may maintain some data structures that should 429 // be updated whenever an allocation crosses a boundary. This function 430 // returns the first such boundary. 431 // (The default implementation returns the end of the space, so the 432 // boundary is never crossed.) 433 virtual HeapWord* initialize_threshold() { return end(); } 434 435 // "q" is an object of the given "size" that should be forwarded; 436 // "cp" names the generation ("gen") and containing "this" (which must 437 // also equal "cp->space"). "compact_top" is where in "this" the 438 // next object should be forwarded to. If there is room in "this" for 439 // the object, insert an appropriate forwarding pointer in "q". 440 // If not, go to the next compaction space (there must 441 // be one, since compaction must succeed -- we go to the first space of 442 // the previous generation if necessary, updating "cp"), reset compact_top 443 // and then forward. In either case, returns the new value of "compact_top". 444 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" 445 // function of the then-current compaction space, and updates "cp->threshold 446 // accordingly". 447 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 448 HeapWord* compact_top); 449 450 // Return a size with adjustments as required of the space. 451 virtual size_t adjust_object_size_v(size_t size) const { return size; } 452 453 void set_first_dead(HeapWord* value) { _first_dead = value; } 454 void set_end_of_live(HeapWord* value) { _end_of_live = value; } 455 456 protected: 457 // Used during compaction. 458 HeapWord* _first_dead; 459 HeapWord* _end_of_live; 460 461 // Minimum size of a free block. 462 virtual size_t minimum_free_block_size() const { return 0; } 463 464 // This the function is invoked when an allocation of an object covering 465 // "start" to "end occurs crosses the threshold; returns the next 466 // threshold. (The default implementation does nothing.) 467 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 468 return end(); 469 } 470 471 // Below are template functions for scan_and_* algorithms (avoiding virtual calls). 472 // The space argument should be a subclass of CompactibleSpace, implementing 473 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), 474 // and possibly also overriding obj_size(), and adjust_obj_size(). 475 // These functions should avoid virtual calls whenever possible. 476 477 // Frequently calls adjust_obj_size(). 478 template <class SpaceType> 479 static inline void scan_and_adjust_pointers(SpaceType* space); 480 481 // Frequently calls obj_size(). 482 template <class SpaceType> 483 static inline void scan_and_compact(SpaceType* space); 484 485 // Frequently calls scanned_block_is_obj() and scanned_block_size(). 486 // Requires the scan_limit() function. 487 template <class SpaceType> 488 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); 489 }; 490 491 class GenSpaceMangler; 492 493 // A space in which the free area is contiguous. It therefore supports 494 // faster allocation, and compaction. 495 class ContiguousSpace: public CompactibleSpace { 496 friend class VMStructs; 497 // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class 498 template <typename SpaceType> 499 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 500 501 private: 502 // Auxiliary functions for scan_and_forward support. 503 // See comments for CompactibleSpace for more information. 504 inline HeapWord* scan_limit() const { 505 return top(); 506 } 507 508 inline bool scanned_block_is_obj(const HeapWord* addr) const { 509 return true; // Always true, since scan_limit is top 510 } 511 512 inline size_t scanned_block_size(const HeapWord* addr) const; 513 514 protected: 515 HeapWord* _top; 516 HeapWord* _concurrent_iteration_safe_limit; 517 // A helper for mangling the unused area of the space in debug builds. 518 GenSpaceMangler* _mangler; 519 520 GenSpaceMangler* mangler() { return _mangler; } 521 522 // Allocation helpers (return NULL if full). 523 inline HeapWord* allocate_impl(size_t word_size); 524 inline HeapWord* par_allocate_impl(size_t word_size); 525 526 public: 527 ContiguousSpace(); 528 ~ContiguousSpace(); 529 530 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 531 virtual void clear(bool mangle_space); 532 533 // Accessors 534 HeapWord* top() const { return _top; } 535 void set_top(HeapWord* value) { _top = value; } 536 537 void set_saved_mark() { _saved_mark_word = top(); } 538 void reset_saved_mark() { _saved_mark_word = bottom(); } 539 540 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 541 542 // In debug mode mangle (write it with a particular bit 543 // pattern) the unused part of a space. 544 545 // Used to save the an address in a space for later use during mangling. 546 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 547 // Used to save the space's current top for later use during mangling. 548 void set_top_for_allocations() PRODUCT_RETURN; 549 550 // Mangle regions in the space from the current top up to the 551 // previously mangled part of the space. 552 void mangle_unused_area() PRODUCT_RETURN; 553 // Mangle [top, end) 554 void mangle_unused_area_complete() PRODUCT_RETURN; 555 556 // Do some sparse checking on the area that should have been mangled. 557 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 558 // Check the complete area that should have been mangled. 559 // This code may be NULL depending on the macro DEBUG_MANGLING. 560 void check_mangled_unused_area_complete() PRODUCT_RETURN; 561 562 // Size computations: sizes in bytes. 563 size_t capacity() const { return byte_size(bottom(), end()); } 564 size_t used() const { return byte_size(bottom(), top()); } 565 size_t free() const { return byte_size(top(), end()); } 566 567 virtual bool is_free_block(const HeapWord* p) const; 568 569 // In a contiguous space we have a more obvious bound on what parts 570 // contain objects. 571 MemRegion used_region() const { return MemRegion(bottom(), top()); } 572 573 // Allocation (return NULL if full) 574 virtual HeapWord* allocate(size_t word_size); 575 virtual HeapWord* par_allocate(size_t word_size); 576 HeapWord* allocate_aligned(size_t word_size); 577 578 // Iteration 579 void oop_iterate(ExtendedOopClosure* cl); 580 void object_iterate(ObjectClosure* blk); 581 // For contiguous spaces this method will iterate safely over objects 582 // in the space (i.e., between bottom and top) when at a safepoint. 583 void safe_object_iterate(ObjectClosure* blk); 584 585 // Iterate over as many initialized objects in the space as possible, 586 // calling "cl.do_object_careful" on each. Return NULL if all objects 587 // in the space (at the start of the iteration) were iterated over. 588 // Return an address indicating the extent of the iteration in the 589 // event that the iteration had to return because of finding an 590 // uninitialized object in the space, or if the closure "cl" 591 // signaled early termination. 592 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 593 HeapWord* concurrent_iteration_safe_limit() { 594 assert(_concurrent_iteration_safe_limit <= top(), 595 "_concurrent_iteration_safe_limit update missed"); 596 return _concurrent_iteration_safe_limit; 597 } 598 // changes the safe limit, all objects from bottom() to the new 599 // limit should be properly initialized 600 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { 601 assert(new_limit <= top(), "uninitialized objects in the safe range"); 602 _concurrent_iteration_safe_limit = new_limit; 603 } 604 605 606 #if INCLUDE_ALL_GCS 607 // In support of parallel oop_iterate. 608 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 609 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 610 611 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 612 #undef ContigSpace_PAR_OOP_ITERATE_DECL 613 #endif // INCLUDE_ALL_GCS 614 615 // Compaction support 616 virtual void reset_after_compaction() { 617 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 618 set_top(compaction_top()); 619 // set new iteration safe limit 620 set_concurrent_iteration_safe_limit(compaction_top()); 621 } 622 623 // Override. 624 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 625 CardTable::PrecisionStyle precision, 626 HeapWord* boundary, 627 bool parallel); 628 629 // Apply "blk->do_oop" to the addresses of all reference fields in objects 630 // starting with the _saved_mark_word, which was noted during a generation's 631 // save_marks and is required to denote the head of an object. 632 // Fields in objects allocated by applications of the closure 633 // *are* included in the iteration. 634 // Updates _saved_mark_word to point to just after the last object 635 // iterated over. 636 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 637 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 638 639 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 640 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 641 642 // Same as object_iterate, but starting from "mark", which is required 643 // to denote the start of an object. Objects allocated by 644 // applications of the closure *are* included in the iteration. 645 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk); 646 647 // Very inefficient implementation. 648 virtual HeapWord* block_start_const(const void* p) const; 649 size_t block_size(const HeapWord* p) const; 650 // If a block is in the allocated area, it is an object. 651 bool block_is_obj(const HeapWord* p) const { return p < top(); } 652 653 // Addresses for inlined allocation 654 HeapWord** top_addr() { return &_top; } 655 HeapWord** end_addr() { return &_end; } 656 657 // Overrides for more efficient compaction support. 658 void prepare_for_compaction(CompactPoint* cp); 659 660 virtual void print_on(outputStream* st) const; 661 662 // Checked dynamic downcasts. 663 virtual ContiguousSpace* toContiguousSpace() { 664 return this; 665 } 666 667 // Debugging 668 virtual void verify() const; 669 670 // Used to increase collection frequency. "factor" of 0 means entire 671 // space. 672 void allocate_temporary_filler(int factor); 673 }; 674 675 676 // A dirty card to oop closure that does filtering. 677 // It knows how to filter out objects that are outside of the _boundary. 678 class FilteringDCTOC : public DirtyCardToOopClosure { 679 protected: 680 // Override. 681 void walk_mem_region(MemRegion mr, 682 HeapWord* bottom, HeapWord* top); 683 684 // Walk the given memory region, from bottom to top, applying 685 // the given oop closure to (possibly) all objects found. The 686 // given oop closure may or may not be the same as the oop 687 // closure with which this closure was created, as it may 688 // be a filtering closure which makes use of the _boundary. 689 // We offer two signatures, so the FilteringClosure static type is 690 // apparent. 691 virtual void walk_mem_region_with_cl(MemRegion mr, 692 HeapWord* bottom, HeapWord* top, 693 ExtendedOopClosure* cl) = 0; 694 virtual void walk_mem_region_with_cl(MemRegion mr, 695 HeapWord* bottom, HeapWord* top, 696 FilteringClosure* cl) = 0; 697 698 public: 699 FilteringDCTOC(Space* sp, ExtendedOopClosure* cl, 700 CardTable::PrecisionStyle precision, 701 HeapWord* boundary) : 702 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 703 }; 704 705 // A dirty card to oop closure for contiguous spaces 706 // (ContiguousSpace and sub-classes). 707 // It is a FilteringClosure, as defined above, and it knows: 708 // 709 // 1. That the actual top of any area in a memory region 710 // contained by the space is bounded by the end of the contiguous 711 // region of the space. 712 // 2. That the space is really made up of objects and not just 713 // blocks. 714 715 class ContiguousSpaceDCTOC : public FilteringDCTOC { 716 protected: 717 // Overrides. 718 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 719 720 virtual void walk_mem_region_with_cl(MemRegion mr, 721 HeapWord* bottom, HeapWord* top, 722 ExtendedOopClosure* cl); 723 virtual void walk_mem_region_with_cl(MemRegion mr, 724 HeapWord* bottom, HeapWord* top, 725 FilteringClosure* cl); 726 727 public: 728 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, 729 CardTable::PrecisionStyle precision, 730 HeapWord* boundary) : 731 FilteringDCTOC(sp, cl, precision, boundary) 732 {} 733 }; 734 735 // A ContigSpace that Supports an efficient "block_start" operation via 736 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 737 // other spaces.) This is the abstract base class for old generation 738 // (tenured) spaces. 739 740 class OffsetTableContigSpace: public ContiguousSpace { 741 friend class VMStructs; 742 protected: 743 BlockOffsetArrayContigSpace _offsets; 744 Mutex _par_alloc_lock; 745 746 public: 747 // Constructor 748 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 749 MemRegion mr); 750 751 void set_bottom(HeapWord* value); 752 void set_end(HeapWord* value); 753 754 void clear(bool mangle_space); 755 756 inline HeapWord* block_start_const(const void* p) const; 757 758 // Add offset table update. 759 virtual inline HeapWord* allocate(size_t word_size); 760 inline HeapWord* par_allocate(size_t word_size); 761 762 // MarkSweep support phase3 763 virtual HeapWord* initialize_threshold(); 764 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 765 766 virtual void print_on(outputStream* st) const; 767 768 // Debugging 769 void verify() const; 770 }; 771 772 773 // Class TenuredSpace is used by TenuredGeneration 774 775 class TenuredSpace: public OffsetTableContigSpace { 776 friend class VMStructs; 777 protected: 778 // Mark sweep support 779 size_t allowed_dead_ratio() const; 780 public: 781 // Constructor 782 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 783 MemRegion mr) : 784 OffsetTableContigSpace(sharedOffsetArray, mr) {} 785 }; 786 #endif // SHARE_VM_GC_SHARED_SPACE_HPP