1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP 26 #define SHARE_VM_MEMORY_SPACE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/blockOffsetTable.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/iterator.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/watermark.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/macros.hpp" 37 #include "utilities/workgroup.hpp" 38 39 // A space is an abstraction for the "storage units" backing 40 // up the generation abstraction. It includes specific 41 // implementations for keeping track of free and used space, 42 // for iterating over objects and free blocks, etc. 43 44 // Forward decls. 45 class Space; 46 class BlockOffsetArray; 47 class BlockOffsetArrayContigSpace; 48 class Generation; 49 class CompactibleSpace; 50 class BlockOffsetTable; 51 class GenRemSet; 52 class CardTableRS; 53 class DirtyCardToOopClosure; 54 55 // A Space describes a heap area. Class Space is an abstract 56 // base class. 57 // 58 // Space supports allocation, size computation and GC support is provided. 59 // 60 // Invariant: bottom() and end() are on page_size boundaries and 61 // bottom() <= top() <= end() 62 // top() is inclusive and end() is exclusive. 63 64 class Space: public CHeapObj<mtGC> { 65 friend class VMStructs; 66 protected: 67 HeapWord* _bottom; 68 HeapWord* _end; 69 70 // Used in support of save_marks() 71 HeapWord* _saved_mark_word; 72 73 // A sequential tasks done structure. This supports 74 // parallel GC, where we have threads dynamically 75 // claiming sub-tasks from a larger parallel task. 76 SequentialSubTasksDone _par_seq_tasks; 77 78 Space(): 79 _bottom(NULL), _end(NULL) { } 80 81 public: 82 // Accessors 83 HeapWord* bottom() const { return _bottom; } 84 HeapWord* end() const { return _end; } 85 virtual void set_bottom(HeapWord* value) { _bottom = value; } 86 virtual void set_end(HeapWord* value) { _end = value; } 87 88 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 89 90 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 91 92 // Returns true if this object has been allocated since a 93 // generation's "save_marks" call. 94 virtual bool obj_allocated_since_save_marks(const oop obj) const { 95 return (HeapWord*)obj >= saved_mark_word(); 96 } 97 98 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { 99 return NULL; 100 } 101 102 // Returns a subregion of the space containing only the allocated objects in 103 // the space. 104 virtual MemRegion used_region() const = 0; 105 106 // Returns a region that is guaranteed to contain (at least) all objects 107 // allocated at the time of the last call to "save_marks". If the space 108 // initializes its DirtyCardToOopClosure's specifying the "contig" option 109 // (that is, if the space is contiguous), then this region must contain only 110 // such objects: the memregion will be from the bottom of the region to the 111 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 112 // the space must distinguish between objects in the region allocated before 113 // and after the call to save marks. 114 MemRegion used_region_at_save_marks() const { 115 return MemRegion(bottom(), saved_mark_word()); 116 } 117 118 // Initialization. 119 // "initialize" should be called once on a space, before it is used for 120 // any purpose. The "mr" arguments gives the bounds of the space, and 121 // the "clear_space" argument should be true unless the memory in "mr" is 122 // known to be zeroed. 123 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 124 125 // The "clear" method must be called on a region that may have 126 // had allocation performed in it, but is now to be considered empty. 127 virtual void clear(bool mangle_space); 128 129 // For detecting GC bugs. Should only be called at GC boundaries, since 130 // some unused space may be used as scratch space during GC's. 131 // Default implementation does nothing. We also call this when expanding 132 // a space to satisfy an allocation request. See bug #4668531 133 virtual void mangle_unused_area() {} 134 virtual void mangle_unused_area_complete() {} 135 virtual void mangle_region(MemRegion mr) {} 136 137 // Testers 138 bool is_empty() const { return used() == 0; } 139 bool not_empty() const { return used() > 0; } 140 141 // Returns true iff the given the space contains the 142 // given address as part of an allocated object. For 143 // certain kinds of spaces, this might be a potentially 144 // expensive operation. To prevent performance problems 145 // on account of its inadvertent use in product jvm's, 146 // we restrict its use to assertion checks only. 147 bool is_in(const void* p) const { 148 return used_region().contains(p); 149 } 150 151 // Returns true iff the given reserved memory of the space contains the 152 // given address. 153 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 154 155 // Returns true iff the given block is not allocated. 156 virtual bool is_free_block(const HeapWord* p) const = 0; 157 158 // Test whether p is double-aligned 159 static bool is_aligned(void* p) { 160 return ((intptr_t)p & (sizeof(double)-1)) == 0; 161 } 162 163 // Size computations. Sizes are in bytes. 164 size_t capacity() const { return byte_size(bottom(), end()); } 165 virtual size_t used() const = 0; 166 virtual size_t free() const = 0; 167 168 // Iterate over all the ref-containing fields of all objects in the 169 // space, calling "cl.do_oop" on each. Fields in objects allocated by 170 // applications of the closure are not included in the iteration. 171 virtual void oop_iterate(ExtendedOopClosure* cl); 172 173 // Iterate over all objects in the space, calling "cl.do_object" on 174 // each. Objects allocated by applications of the closure are not 175 // included in the iteration. 176 virtual void object_iterate(ObjectClosure* blk) = 0; 177 // Similar to object_iterate() except only iterates over 178 // objects whose internal references point to objects in the space. 179 virtual void safe_object_iterate(ObjectClosure* blk) = 0; 180 181 // Create and return a new dirty card to oop closure. Can be 182 // overridden to return the appropriate type of closure 183 // depending on the type of space in which the closure will 184 // operate. ResourceArea allocated. 185 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 186 CardTableModRefBS::PrecisionStyle precision, 187 HeapWord* boundary = NULL); 188 189 // If "p" is in the space, returns the address of the start of the 190 // "block" that contains "p". We say "block" instead of "object" since 191 // some heaps may not pack objects densely; a chunk may either be an 192 // object or a non-object. If "p" is not in the space, return NULL. 193 virtual HeapWord* block_start_const(const void* p) const = 0; 194 195 // The non-const version may have benevolent side effects on the data 196 // structure supporting these calls, possibly speeding up future calls. 197 // The default implementation, however, is simply to call the const 198 // version. 199 inline virtual HeapWord* block_start(const void* p); 200 201 // Requires "addr" to be the start of a chunk, and returns its size. 202 // "addr + size" is required to be the start of a new chunk, or the end 203 // of the active area of the heap. 204 virtual size_t block_size(const HeapWord* addr) const = 0; 205 206 // Requires "addr" to be the start of a block, and returns "TRUE" iff 207 // the block is an object. 208 virtual bool block_is_obj(const HeapWord* addr) const = 0; 209 210 // Requires "addr" to be the start of a block, and returns "TRUE" iff 211 // the block is an object and the object is alive. 212 virtual bool obj_is_alive(const HeapWord* addr) const; 213 214 // Allocation (return NULL if full). Assumes the caller has established 215 // mutually exclusive access to the space. 216 virtual HeapWord* allocate(size_t word_size) = 0; 217 218 // Allocation (return NULL if full). Enforces mutual exclusion internally. 219 virtual HeapWord* par_allocate(size_t word_size) = 0; 220 221 // Mark-sweep-compact support: all spaces can update pointers to objects 222 // moving as a part of compaction. 223 virtual void adjust_pointers() = 0; 224 225 // PrintHeapAtGC support 226 virtual void print() const; 227 virtual void print_on(outputStream* st) const; 228 virtual void print_short() const; 229 virtual void print_short_on(outputStream* st) const; 230 231 232 // Accessor for parallel sequential tasks. 233 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 234 235 // IF "this" is a ContiguousSpace, return it, else return NULL. 236 virtual ContiguousSpace* toContiguousSpace() { 237 return NULL; 238 } 239 240 // Debugging 241 virtual void verify() const = 0; 242 }; 243 244 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 245 // OopClosure to (the addresses of) all the ref-containing fields that could 246 // be modified by virtue of the given MemRegion being dirty. (Note that 247 // because of the imprecise nature of the write barrier, this may iterate 248 // over oops beyond the region.) 249 // This base type for dirty card to oop closures handles memory regions 250 // in non-contiguous spaces with no boundaries, and should be sub-classed 251 // to support other space types. See ContiguousDCTOC for a sub-class 252 // that works with ContiguousSpaces. 253 254 class DirtyCardToOopClosure: public MemRegionClosureRO { 255 protected: 256 ExtendedOopClosure* _cl; 257 Space* _sp; 258 CardTableModRefBS::PrecisionStyle _precision; 259 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 260 // pointing below boundary. 261 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 262 // a downwards traversal; this is the 263 // lowest location already done (or, 264 // alternatively, the lowest address that 265 // shouldn't be done again. NULL means infinity.) 266 NOT_PRODUCT(HeapWord* _last_bottom;) 267 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 268 269 // Get the actual top of the area on which the closure will 270 // operate, given where the top is assumed to be (the end of the 271 // memory region passed to do_MemRegion) and where the object 272 // at the top is assumed to start. For example, an object may 273 // start at the top but actually extend past the assumed top, 274 // in which case the top becomes the end of the object. 275 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 276 277 // Walk the given memory region from bottom to (actual) top 278 // looking for objects and applying the oop closure (_cl) to 279 // them. The base implementation of this treats the area as 280 // blocks, where a block may or may not be an object. Sub- 281 // classes should override this to provide more accurate 282 // or possibly more efficient walking. 283 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 284 285 public: 286 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, 287 CardTableModRefBS::PrecisionStyle precision, 288 HeapWord* boundary) : 289 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 290 _min_done(NULL) { 291 NOT_PRODUCT(_last_bottom = NULL); 292 NOT_PRODUCT(_last_explicit_min_done = NULL); 293 } 294 295 void do_MemRegion(MemRegion mr); 296 297 void set_min_done(HeapWord* min_done) { 298 _min_done = min_done; 299 NOT_PRODUCT(_last_explicit_min_done = _min_done); 300 } 301 #ifndef PRODUCT 302 void set_last_bottom(HeapWord* last_bottom) { 303 _last_bottom = last_bottom; 304 } 305 #endif 306 }; 307 308 // A structure to represent a point at which objects are being copied 309 // during compaction. 310 class CompactPoint : public StackObj { 311 public: 312 Generation* gen; 313 CompactibleSpace* space; 314 HeapWord* threshold; 315 316 CompactPoint(Generation* g = NULL) : 317 gen(g), space(NULL), threshold(0) {} 318 }; 319 320 // A space that supports compaction operations. This is usually, but not 321 // necessarily, a space that is normally contiguous. But, for example, a 322 // free-list-based space whose normal collection is a mark-sweep without 323 // compaction could still support compaction in full GC's. 324 // 325 // The compaction operations are implemented by the 326 // scan_and_{adjust_pointers,compact,forward} function templates. 327 // The following are, non-virtual, auxiliary functions used by these function templates: 328 // - scan_limit() 329 // - scanned_block_is_obj() 330 // - scanned_block_size() 331 // - adjust_obj_size() 332 // - obj_size() 333 // These functions are to be used exclusively by the scan_and_* function templates, 334 // and must be defined for all (non-abstract) subclasses of CompactibleSpace. 335 // 336 // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior 337 // in any of the auxiliary functions must also override the corresponding 338 // prepare_for_compaction/adjust_pointers/compact functions using them. 339 // If not, such changes will not be used or have no effect on the compaction operations. 340 // 341 // This translates to the following dependencies: 342 // Overrides/definitions of 343 // - scan_limit 344 // - scanned_block_is_obj 345 // - scanned_block_size 346 // require override/definition of prepare_for_compaction(). 347 // Similar dependencies exist between 348 // - adjust_obj_size and adjust_pointers() 349 // - obj_size and compact(). 350 // 351 // Additionally, this also means that changes to block_size() or block_is_obj() that 352 // should be effective during the compaction operations must provide a corresponding 353 // definition of scanned_block_size/scanned_block_is_obj respectively. 354 class CompactibleSpace: public Space { 355 friend class VMStructs; 356 friend class CompactibleFreeListSpace; 357 private: 358 HeapWord* _compaction_top; 359 CompactibleSpace* _next_compaction_space; 360 361 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. 362 inline size_t adjust_obj_size(size_t size) const { 363 return size; 364 } 365 366 inline size_t obj_size(const HeapWord* addr) const { 367 return oop(addr)->size(); 368 } 369 370 public: 371 CompactibleSpace() : 372 _compaction_top(NULL), _next_compaction_space(NULL) {} 373 374 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 375 virtual void clear(bool mangle_space); 376 377 // Used temporarily during a compaction phase to hold the value 378 // top should have when compaction is complete. 379 HeapWord* compaction_top() const { return _compaction_top; } 380 381 void set_compaction_top(HeapWord* value) { 382 assert(value == NULL || (value >= bottom() && value <= end()), 383 "should point inside space"); 384 _compaction_top = value; 385 } 386 387 // Perform operations on the space needed after a compaction 388 // has been performed. 389 virtual void reset_after_compaction() = 0; 390 391 // Returns the next space (in the current generation) to be compacted in 392 // the global compaction order. Also is used to select the next 393 // space into which to compact. 394 395 virtual CompactibleSpace* next_compaction_space() const { 396 return _next_compaction_space; 397 } 398 399 void set_next_compaction_space(CompactibleSpace* csp) { 400 _next_compaction_space = csp; 401 } 402 403 // MarkSweep support phase2 404 405 // Start the process of compaction of the current space: compute 406 // post-compaction addresses, and insert forwarding pointers. The fields 407 // "cp->gen" and "cp->compaction_space" are the generation and space into 408 // which we are currently compacting. This call updates "cp" as necessary, 409 // and leaves the "compaction_top" of the final value of 410 // "cp->compaction_space" up-to-date. Offset tables may be updated in 411 // this phase as if the final copy had occurred; if so, "cp->threshold" 412 // indicates when the next such action should be taken. 413 virtual void prepare_for_compaction(CompactPoint* cp) = 0; 414 // MarkSweep support phase3 415 virtual void adjust_pointers(); 416 // MarkSweep support phase4 417 virtual void compact(); 418 419 // The maximum percentage of objects that can be dead in the compacted 420 // live part of a compacted space ("deadwood" support.) 421 virtual size_t allowed_dead_ratio() const { return 0; }; 422 423 // Some contiguous spaces may maintain some data structures that should 424 // be updated whenever an allocation crosses a boundary. This function 425 // returns the first such boundary. 426 // (The default implementation returns the end of the space, so the 427 // boundary is never crossed.) 428 virtual HeapWord* initialize_threshold() { return end(); } 429 430 // "q" is an object of the given "size" that should be forwarded; 431 // "cp" names the generation ("gen") and containing "this" (which must 432 // also equal "cp->space"). "compact_top" is where in "this" the 433 // next object should be forwarded to. If there is room in "this" for 434 // the object, insert an appropriate forwarding pointer in "q". 435 // If not, go to the next compaction space (there must 436 // be one, since compaction must succeed -- we go to the first space of 437 // the previous generation if necessary, updating "cp"), reset compact_top 438 // and then forward. In either case, returns the new value of "compact_top". 439 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" 440 // function of the then-current compaction space, and updates "cp->threshold 441 // accordingly". 442 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 443 HeapWord* compact_top); 444 445 // Return a size with adjustments as required of the space. 446 virtual size_t adjust_object_size_v(size_t size) const { return size; } 447 448 protected: 449 // Used during compaction. 450 HeapWord* _first_dead; 451 HeapWord* _end_of_live; 452 453 // Minimum size of a free block. 454 virtual size_t minimum_free_block_size() const { return 0; } 455 456 // This the function is invoked when an allocation of an object covering 457 // "start" to "end occurs crosses the threshold; returns the next 458 // threshold. (The default implementation does nothing.) 459 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 460 return end(); 461 } 462 463 // Requires "allowed_deadspace_words > 0", that "q" is the start of a 464 // free block of the given "word_len", and that "q", were it an object, 465 // would not move if forwarded. If the size allows, fill the free 466 // block with an object, to prevent excessive compaction. Returns "true" 467 // iff the free region was made deadspace, and modifies 468 // "allowed_deadspace_words" to reflect the number of available deadspace 469 // words remaining after this operation. 470 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, 471 size_t word_len); 472 473 // Below are template functions for scan_and_* algorithms (avoiding virtual calls). 474 // The space argument should be a subclass of CompactibleSpace, implementing 475 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), 476 // and possibly also overriding obj_size(), and adjust_obj_size(). 477 // These functions should avoid virtual calls whenever possible. 478 479 // Frequently calls adjust_obj_size(). 480 template <class SpaceType> 481 static inline void scan_and_adjust_pointers(SpaceType* space); 482 483 // Frequently calls obj_size(). 484 template <class SpaceType> 485 static inline void scan_and_compact(SpaceType* space); 486 487 // Frequently calls scanned_block_is_obj() and scanned_block_size(). 488 // Requires the scan_limit() function. 489 template <class SpaceType> 490 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); 491 }; 492 493 class GenSpaceMangler; 494 495 // A space in which the free area is contiguous. It therefore supports 496 // faster allocation, and compaction. 497 class ContiguousSpace: public CompactibleSpace { 498 friend class OneContigSpaceCardGeneration; 499 friend class VMStructs; 500 // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class 501 template <typename SpaceType> 502 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 503 504 private: 505 // Auxiliary functions for scan_and_forward support. 506 // See comments for CompactibleSpace for more information. 507 inline HeapWord* scan_limit() const { 508 return top(); 509 } 510 511 inline bool scanned_block_is_obj(const HeapWord* addr) const { 512 return true; // Always true, since scan_limit is top 513 } 514 515 inline size_t scanned_block_size(const HeapWord* addr) const { 516 return oop(addr)->size(); 517 } 518 519 protected: 520 HeapWord* _top; 521 HeapWord* _concurrent_iteration_safe_limit; 522 // A helper for mangling the unused area of the space in debug builds. 523 GenSpaceMangler* _mangler; 524 525 GenSpaceMangler* mangler() { return _mangler; } 526 527 // Allocation helpers (return NULL if full). 528 inline HeapWord* allocate_impl(size_t word_size); 529 inline HeapWord* par_allocate_impl(size_t word_size); 530 531 public: 532 ContiguousSpace(); 533 ~ContiguousSpace(); 534 535 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 536 virtual void clear(bool mangle_space); 537 538 // Accessors 539 HeapWord* top() const { return _top; } 540 void set_top(HeapWord* value) { _top = value; } 541 542 void set_saved_mark() { _saved_mark_word = top(); } 543 void reset_saved_mark() { _saved_mark_word = bottom(); } 544 545 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 546 WaterMark top_mark() { return WaterMark(this, top()); } 547 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 548 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 549 550 // In debug mode mangle (write it with a particular bit 551 // pattern) the unused part of a space. 552 553 // Used to save the an address in a space for later use during mangling. 554 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 555 // Used to save the space's current top for later use during mangling. 556 void set_top_for_allocations() PRODUCT_RETURN; 557 558 // Mangle regions in the space from the current top up to the 559 // previously mangled part of the space. 560 void mangle_unused_area() PRODUCT_RETURN; 561 // Mangle [top, end) 562 void mangle_unused_area_complete() PRODUCT_RETURN; 563 // Mangle the given MemRegion. 564 void mangle_region(MemRegion mr) PRODUCT_RETURN; 565 566 // Do some sparse checking on the area that should have been mangled. 567 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 568 // Check the complete area that should have been mangled. 569 // This code may be NULL depending on the macro DEBUG_MANGLING. 570 void check_mangled_unused_area_complete() PRODUCT_RETURN; 571 572 // Size computations: sizes in bytes. 573 size_t capacity() const { return byte_size(bottom(), end()); } 574 size_t used() const { return byte_size(bottom(), top()); } 575 size_t free() const { return byte_size(top(), end()); } 576 577 virtual bool is_free_block(const HeapWord* p) const; 578 579 // In a contiguous space we have a more obvious bound on what parts 580 // contain objects. 581 MemRegion used_region() const { return MemRegion(bottom(), top()); } 582 583 // Allocation (return NULL if full) 584 virtual HeapWord* allocate(size_t word_size); 585 virtual HeapWord* par_allocate(size_t word_size); 586 HeapWord* allocate_aligned(size_t word_size); 587 588 // Iteration 589 void oop_iterate(ExtendedOopClosure* cl); 590 void object_iterate(ObjectClosure* blk); 591 // For contiguous spaces this method will iterate safely over objects 592 // in the space (i.e., between bottom and top) when at a safepoint. 593 void safe_object_iterate(ObjectClosure* blk); 594 595 // Iterate over as many initialized objects in the space as possible, 596 // calling "cl.do_object_careful" on each. Return NULL if all objects 597 // in the space (at the start of the iteration) were iterated over. 598 // Return an address indicating the extent of the iteration in the 599 // event that the iteration had to return because of finding an 600 // uninitialized object in the space, or if the closure "cl" 601 // signaled early termination. 602 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 603 HeapWord* concurrent_iteration_safe_limit() { 604 assert(_concurrent_iteration_safe_limit <= top(), 605 "_concurrent_iteration_safe_limit update missed"); 606 return _concurrent_iteration_safe_limit; 607 } 608 // changes the safe limit, all objects from bottom() to the new 609 // limit should be properly initialized 610 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { 611 assert(new_limit <= top(), "uninitialized objects in the safe range"); 612 _concurrent_iteration_safe_limit = new_limit; 613 } 614 615 616 #if INCLUDE_ALL_GCS 617 // In support of parallel oop_iterate. 618 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 619 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 620 621 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 622 #undef ContigSpace_PAR_OOP_ITERATE_DECL 623 #endif // INCLUDE_ALL_GCS 624 625 // Compaction support 626 virtual void reset_after_compaction() { 627 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 628 set_top(compaction_top()); 629 // set new iteration safe limit 630 set_concurrent_iteration_safe_limit(compaction_top()); 631 } 632 633 // Override. 634 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 635 CardTableModRefBS::PrecisionStyle precision, 636 HeapWord* boundary = NULL); 637 638 // Apply "blk->do_oop" to the addresses of all reference fields in objects 639 // starting with the _saved_mark_word, which was noted during a generation's 640 // save_marks and is required to denote the head of an object. 641 // Fields in objects allocated by applications of the closure 642 // *are* included in the iteration. 643 // Updates _saved_mark_word to point to just after the last object 644 // iterated over. 645 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 646 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 647 648 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 649 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 650 651 // Same as object_iterate, but starting from "mark", which is required 652 // to denote the start of an object. Objects allocated by 653 // applications of the closure *are* included in the iteration. 654 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 655 656 // Very inefficient implementation. 657 virtual HeapWord* block_start_const(const void* p) const; 658 size_t block_size(const HeapWord* p) const; 659 // If a block is in the allocated area, it is an object. 660 bool block_is_obj(const HeapWord* p) const { return p < top(); } 661 662 // Addresses for inlined allocation 663 HeapWord** top_addr() { return &_top; } 664 HeapWord** end_addr() { return &_end; } 665 666 // Overrides for more efficient compaction support. 667 void prepare_for_compaction(CompactPoint* cp); 668 669 // PrintHeapAtGC support. 670 virtual void print_on(outputStream* st) const; 671 672 // Checked dynamic downcasts. 673 virtual ContiguousSpace* toContiguousSpace() { 674 return this; 675 } 676 677 // Debugging 678 virtual void verify() const; 679 680 // Used to increase collection frequency. "factor" of 0 means entire 681 // space. 682 void allocate_temporary_filler(int factor); 683 }; 684 685 686 // A dirty card to oop closure that does filtering. 687 // It knows how to filter out objects that are outside of the _boundary. 688 class Filtering_DCTOC : public DirtyCardToOopClosure { 689 protected: 690 // Override. 691 void walk_mem_region(MemRegion mr, 692 HeapWord* bottom, HeapWord* top); 693 694 // Walk the given memory region, from bottom to top, applying 695 // the given oop closure to (possibly) all objects found. The 696 // given oop closure may or may not be the same as the oop 697 // closure with which this closure was created, as it may 698 // be a filtering closure which makes use of the _boundary. 699 // We offer two signatures, so the FilteringClosure static type is 700 // apparent. 701 virtual void walk_mem_region_with_cl(MemRegion mr, 702 HeapWord* bottom, HeapWord* top, 703 ExtendedOopClosure* cl) = 0; 704 virtual void walk_mem_region_with_cl(MemRegion mr, 705 HeapWord* bottom, HeapWord* top, 706 FilteringClosure* cl) = 0; 707 708 public: 709 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, 710 CardTableModRefBS::PrecisionStyle precision, 711 HeapWord* boundary) : 712 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 713 }; 714 715 // A dirty card to oop closure for contiguous spaces 716 // (ContiguousSpace and sub-classes). 717 // It is a FilteringClosure, as defined above, and it knows: 718 // 719 // 1. That the actual top of any area in a memory region 720 // contained by the space is bounded by the end of the contiguous 721 // region of the space. 722 // 2. That the space is really made up of objects and not just 723 // blocks. 724 725 class ContiguousSpaceDCTOC : public Filtering_DCTOC { 726 protected: 727 // Overrides. 728 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 729 730 virtual void walk_mem_region_with_cl(MemRegion mr, 731 HeapWord* bottom, HeapWord* top, 732 ExtendedOopClosure* cl); 733 virtual void walk_mem_region_with_cl(MemRegion mr, 734 HeapWord* bottom, HeapWord* top, 735 FilteringClosure* cl); 736 737 public: 738 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, 739 CardTableModRefBS::PrecisionStyle precision, 740 HeapWord* boundary) : 741 Filtering_DCTOC(sp, cl, precision, boundary) 742 {} 743 }; 744 745 // A ContigSpace that Supports an efficient "block_start" operation via 746 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 747 // other spaces.) This is the abstract base class for old generation 748 // (tenured) spaces. 749 750 class OffsetTableContigSpace: public ContiguousSpace { 751 friend class VMStructs; 752 protected: 753 BlockOffsetArrayContigSpace _offsets; 754 Mutex _par_alloc_lock; 755 756 public: 757 // Constructor 758 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 759 MemRegion mr); 760 761 void set_bottom(HeapWord* value); 762 void set_end(HeapWord* value); 763 764 void clear(bool mangle_space); 765 766 inline HeapWord* block_start_const(const void* p) const; 767 768 // Add offset table update. 769 virtual inline HeapWord* allocate(size_t word_size); 770 inline HeapWord* par_allocate(size_t word_size); 771 772 // MarkSweep support phase3 773 virtual HeapWord* initialize_threshold(); 774 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 775 776 virtual void print_on(outputStream* st) const; 777 778 // Debugging 779 void verify() const; 780 }; 781 782 783 // Class TenuredSpace is used by TenuredGeneration 784 785 class TenuredSpace: public OffsetTableContigSpace { 786 friend class VMStructs; 787 protected: 788 // Mark sweep support 789 size_t allowed_dead_ratio() const; 790 public: 791 // Constructor 792 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 793 MemRegion mr) : 794 OffsetTableContigSpace(sharedOffsetArray, mr) {} 795 }; 796 #endif // SHARE_VM_MEMORY_SPACE_HPP