1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP 26 #define SHARE_VM_MEMORY_SPACE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/blockOffsetTable.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/iterator.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/watermark.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/macros.hpp" 37 #include "utilities/workgroup.hpp" 38 39 // A space is an abstraction for the "storage units" backing 40 // up the generation abstraction. It includes specific 41 // implementations for keeping track of free and used space, 42 // for iterating over objects and free blocks, etc. 43 44 // Here's the Space hierarchy: 45 // 46 // - Space -- an abstract base class describing a heap area 47 // - CompactibleSpace -- a space supporting compaction 48 // - CompactibleFreeListSpace -- (used for CMS generation) 49 // - G1OffsetTableContigSpace -- G1 version of OffsetTableContigSpace 50 // - ContiguousSpace -- a compactible space in which all free space 51 // is contiguous 52 // - EdenSpace -- contiguous space used as nursery 53 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 54 // - OffsetTableContigSpace -- contiguous space with a block offset array 55 // that allows "fast" block_start calls 56 // - TenuredSpace -- (used for TenuredGeneration) 57 58 // Forward decls. 59 class Space; 60 class BlockOffsetArray; 61 class BlockOffsetArrayContigSpace; 62 class Generation; 63 class CompactibleSpace; 64 class BlockOffsetTable; 65 class GenRemSet; 66 class CardTableRS; 67 class DirtyCardToOopClosure; 68 69 // A Space describes a heap area. Class Space is an abstract 70 // base class. 71 // 72 // Space supports allocation, size computation and GC support is provided. 73 // 74 // Invariant: bottom() and end() are on page_size boundaries and 75 // bottom() <= top() <= end() 76 // top() is inclusive and end() is exclusive. 77 78 class Space: public CHeapObj<mtGC> { 79 friend class VMStructs; 80 protected: 81 HeapWord* _bottom; 82 HeapWord* _end; 83 84 // Used in support of save_marks() 85 HeapWord* _saved_mark_word; 86 87 MemRegionClosure* _preconsumptionDirtyCardClosure; 88 89 // A sequential tasks done structure. This supports 90 // parallel GC, where we have threads dynamically 91 // claiming sub-tasks from a larger parallel task. 92 SequentialSubTasksDone _par_seq_tasks; 93 94 Space(): 95 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } 96 97 public: 98 // Accessors 99 HeapWord* bottom() const { return _bottom; } 100 HeapWord* end() const { return _end; } 101 virtual void set_bottom(HeapWord* value) { _bottom = value; } 102 virtual void set_end(HeapWord* value) { _end = value; } 103 104 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 105 106 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 107 108 // Returns true if this object has been allocated since a 109 // generation's "save_marks" call. 110 virtual bool obj_allocated_since_save_marks(const oop obj) const { 111 return (HeapWord*)obj >= saved_mark_word(); 112 } 113 114 MemRegionClosure* preconsumptionDirtyCardClosure() const { 115 return _preconsumptionDirtyCardClosure; 116 } 117 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 118 _preconsumptionDirtyCardClosure = cl; 119 } 120 121 // Returns a subregion of the space containing only the allocated objects in 122 // the space. 123 virtual MemRegion used_region() const = 0; 124 125 // Returns a region that is guaranteed to contain (at least) all objects 126 // allocated at the time of the last call to "save_marks". If the space 127 // initializes its DirtyCardToOopClosure's specifying the "contig" option 128 // (that is, if the space is contiguous), then this region must contain only 129 // such objects: the memregion will be from the bottom of the region to the 130 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 131 // the space must distinguish between objects in the region allocated before 132 // and after the call to save marks. 133 MemRegion used_region_at_save_marks() const { 134 return MemRegion(bottom(), saved_mark_word()); 135 } 136 137 // Initialization. 138 // "initialize" should be called once on a space, before it is used for 139 // any purpose. The "mr" arguments gives the bounds of the space, and 140 // the "clear_space" argument should be true unless the memory in "mr" is 141 // known to be zeroed. 142 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 143 144 // The "clear" method must be called on a region that may have 145 // had allocation performed in it, but is now to be considered empty. 146 virtual void clear(bool mangle_space); 147 148 // For detecting GC bugs. Should only be called at GC boundaries, since 149 // some unused space may be used as scratch space during GC's. 150 // Default implementation does nothing. We also call this when expanding 151 // a space to satisfy an allocation request. See bug #4668531 152 virtual void mangle_unused_area() {} 153 virtual void mangle_unused_area_complete() {} 154 virtual void mangle_region(MemRegion mr) {} 155 156 // Testers 157 bool is_empty() const { return used() == 0; } 158 bool not_empty() const { return used() > 0; } 159 160 // Returns true iff the given the space contains the 161 // given address as part of an allocated object. For 162 // certain kinds of spaces, this might be a potentially 163 // expensive operation. To prevent performance problems 164 // on account of its inadvertent use in product jvm's, 165 // we restrict its use to assertion checks only. 166 bool is_in(const void* p) const { 167 return used_region().contains(p); 168 } 169 170 // Returns true iff the given reserved memory of the space contains the 171 // given address. 172 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 173 174 // Returns true iff the given block is not allocated. 175 virtual bool is_free_block(const HeapWord* p) const = 0; 176 177 // Test whether p is double-aligned 178 static bool is_aligned(void* p) { 179 return ((intptr_t)p & (sizeof(double)-1)) == 0; 180 } 181 182 // Size computations. Sizes are in bytes. 183 size_t capacity() const { return byte_size(bottom(), end()); } 184 virtual size_t used() const = 0; 185 virtual size_t free() const = 0; 186 187 // Iterate over all the ref-containing fields of all objects in the 188 // space, calling "cl.do_oop" on each. Fields in objects allocated by 189 // applications of the closure are not included in the iteration. 190 virtual void oop_iterate(ExtendedOopClosure* cl); 191 192 // Iterate over all objects in the space, calling "cl.do_object" on 193 // each. Objects allocated by applications of the closure are not 194 // included in the iteration. 195 virtual void object_iterate(ObjectClosure* blk) = 0; 196 // Similar to object_iterate() except only iterates over 197 // objects whose internal references point to objects in the space. 198 virtual void safe_object_iterate(ObjectClosure* blk) = 0; 199 200 // Create and return a new dirty card to oop closure. Can be 201 // overridden to return the appropriate type of closure 202 // depending on the type of space in which the closure will 203 // operate. ResourceArea allocated. 204 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 205 CardTableModRefBS::PrecisionStyle precision, 206 HeapWord* boundary = NULL); 207 208 // If "p" is in the space, returns the address of the start of the 209 // "block" that contains "p". We say "block" instead of "object" since 210 // some heaps may not pack objects densely; a chunk may either be an 211 // object or a non-object. If "p" is not in the space, return NULL. 212 virtual HeapWord* block_start_const(const void* p) const = 0; 213 214 // The non-const version may have benevolent side effects on the data 215 // structure supporting these calls, possibly speeding up future calls. 216 // The default implementation, however, is simply to call the const 217 // version. 218 inline virtual HeapWord* block_start(const void* p); 219 220 // Requires "addr" to be the start of a chunk, and returns its size. 221 // "addr + size" is required to be the start of a new chunk, or the end 222 // of the active area of the heap. 223 virtual size_t block_size(const HeapWord* addr) const = 0; 224 225 // Requires "addr" to be the start of a block, and returns "TRUE" iff 226 // the block is an object. 227 virtual bool block_is_obj(const HeapWord* addr) const = 0; 228 229 // Requires "addr" to be the start of a block, and returns "TRUE" iff 230 // the block is an object and the object is alive. 231 virtual bool obj_is_alive(const HeapWord* addr) const; 232 233 // Allocation (return NULL if full). Assumes the caller has established 234 // mutually exclusive access to the space. 235 virtual HeapWord* allocate(size_t word_size) = 0; 236 237 // Allocation (return NULL if full). Enforces mutual exclusion internally. 238 virtual HeapWord* par_allocate(size_t word_size) = 0; 239 240 // Mark-sweep-compact support: all spaces can update pointers to objects 241 // moving as a part of compaction. 242 virtual void adjust_pointers(); 243 244 // PrintHeapAtGC support 245 virtual void print() const; 246 virtual void print_on(outputStream* st) const; 247 virtual void print_short() const; 248 virtual void print_short_on(outputStream* st) const; 249 250 251 // Accessor for parallel sequential tasks. 252 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 253 254 // IF "this" is a ContiguousSpace, return it, else return NULL. 255 virtual ContiguousSpace* toContiguousSpace() { 256 return NULL; 257 } 258 259 // Debugging 260 virtual void verify() const = 0; 261 }; 262 263 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 264 // OopClosure to (the addresses of) all the ref-containing fields that could 265 // be modified by virtue of the given MemRegion being dirty. (Note that 266 // because of the imprecise nature of the write barrier, this may iterate 267 // over oops beyond the region.) 268 // This base type for dirty card to oop closures handles memory regions 269 // in non-contiguous spaces with no boundaries, and should be sub-classed 270 // to support other space types. See ContiguousDCTOC for a sub-class 271 // that works with ContiguousSpaces. 272 273 class DirtyCardToOopClosure: public MemRegionClosureRO { 274 protected: 275 ExtendedOopClosure* _cl; 276 Space* _sp; 277 CardTableModRefBS::PrecisionStyle _precision; 278 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 279 // pointing below boundary. 280 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 281 // a downwards traversal; this is the 282 // lowest location already done (or, 283 // alternatively, the lowest address that 284 // shouldn't be done again. NULL means infinity.) 285 NOT_PRODUCT(HeapWord* _last_bottom;) 286 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 287 288 // Get the actual top of the area on which the closure will 289 // operate, given where the top is assumed to be (the end of the 290 // memory region passed to do_MemRegion) and where the object 291 // at the top is assumed to start. For example, an object may 292 // start at the top but actually extend past the assumed top, 293 // in which case the top becomes the end of the object. 294 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 295 296 // Walk the given memory region from bottom to (actual) top 297 // looking for objects and applying the oop closure (_cl) to 298 // them. The base implementation of this treats the area as 299 // blocks, where a block may or may not be an object. Sub- 300 // classes should override this to provide more accurate 301 // or possibly more efficient walking. 302 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 303 304 public: 305 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, 306 CardTableModRefBS::PrecisionStyle precision, 307 HeapWord* boundary) : 308 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 309 _min_done(NULL) { 310 NOT_PRODUCT(_last_bottom = NULL); 311 NOT_PRODUCT(_last_explicit_min_done = NULL); 312 } 313 314 void do_MemRegion(MemRegion mr); 315 316 void set_min_done(HeapWord* min_done) { 317 _min_done = min_done; 318 NOT_PRODUCT(_last_explicit_min_done = _min_done); 319 } 320 #ifndef PRODUCT 321 void set_last_bottom(HeapWord* last_bottom) { 322 _last_bottom = last_bottom; 323 } 324 #endif 325 }; 326 327 // A structure to represent a point at which objects are being copied 328 // during compaction. 329 class CompactPoint : public StackObj { 330 public: 331 Generation* gen; 332 CompactibleSpace* space; 333 HeapWord* threshold; 334 335 CompactPoint(Generation* _gen) : 336 gen(_gen), space(NULL), threshold(0) {} 337 }; 338 339 340 // A space that supports compaction operations. This is usually, but not 341 // necessarily, a space that is normally contiguous. But, for example, a 342 // free-list-based space whose normal collection is a mark-sweep without 343 // compaction could still support compaction in full GC's. 344 345 class CompactibleSpace: public Space { 346 friend class VMStructs; 347 friend class CompactibleFreeListSpace; 348 private: 349 HeapWord* _compaction_top; 350 CompactibleSpace* _next_compaction_space; 351 352 public: 353 CompactibleSpace() : 354 _compaction_top(NULL), _next_compaction_space(NULL) {} 355 356 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 357 virtual void clear(bool mangle_space); 358 359 // Used temporarily during a compaction phase to hold the value 360 // top should have when compaction is complete. 361 HeapWord* compaction_top() const { return _compaction_top; } 362 363 void set_compaction_top(HeapWord* value) { 364 assert(value == NULL || (value >= bottom() && value <= end()), 365 "should point inside space"); 366 _compaction_top = value; 367 } 368 369 // Perform operations on the space needed after a compaction 370 // has been performed. 371 virtual void reset_after_compaction() = 0; 372 373 // Returns the next space (in the current generation) to be compacted in 374 // the global compaction order. Also is used to select the next 375 // space into which to compact. 376 377 virtual CompactibleSpace* next_compaction_space() const { 378 return _next_compaction_space; 379 } 380 381 void set_next_compaction_space(CompactibleSpace* csp) { 382 _next_compaction_space = csp; 383 } 384 385 // MarkSweep support phase2 386 387 // Start the process of compaction of the current space: compute 388 // post-compaction addresses, and insert forwarding pointers. The fields 389 // "cp->gen" and "cp->compaction_space" are the generation and space into 390 // which we are currently compacting. This call updates "cp" as necessary, 391 // and leaves the "compaction_top" of the final value of 392 // "cp->compaction_space" up-to-date. Offset tables may be updated in 393 // this phase as if the final copy had occurred; if so, "cp->threshold" 394 // indicates when the next such action should be taken. 395 virtual void prepare_for_compaction(CompactPoint* cp) = 0; 396 // MarkSweep support phase3 397 virtual void adjust_pointers(); 398 // MarkSweep support phase4 399 virtual void compact(); 400 401 // The maximum percentage of objects that can be dead in the compacted 402 // live part of a compacted space ("deadwood" support.) 403 virtual size_t allowed_dead_ratio() const { return 0; }; 404 405 // Some contiguous spaces may maintain some data structures that should 406 // be updated whenever an allocation crosses a boundary. This function 407 // returns the first such boundary. 408 // (The default implementation returns the end of the space, so the 409 // boundary is never crossed.) 410 virtual HeapWord* initialize_threshold() { return end(); } 411 412 // "q" is an object of the given "size" that should be forwarded; 413 // "cp" names the generation ("gen") and containing "this" (which must 414 // also equal "cp->space"). "compact_top" is where in "this" the 415 // next object should be forwarded to. If there is room in "this" for 416 // the object, insert an appropriate forwarding pointer in "q". 417 // If not, go to the next compaction space (there must 418 // be one, since compaction must succeed -- we go to the first space of 419 // the previous generation if necessary, updating "cp"), reset compact_top 420 // and then forward. In either case, returns the new value of "compact_top". 421 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" 422 // function of the then-current compaction space, and updates "cp->threshold 423 // accordingly". 424 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 425 HeapWord* compact_top); 426 427 // Return a size with adjustments as required of the space. 428 virtual size_t adjust_object_size_v(size_t size) const { return size; } 429 430 // Functions for scan_and_{forward,adjust_pointers,compact} support. 431 inline bool scanned_block_is_obj(const HeapWord* addr) const { 432 // Perform virtual call. This is currently not a problem since this 433 // function is only used in an assert (from scan_and_adjust_pointers). 434 return block_is_obj(addr); 435 } 436 437 inline size_t adjust_obj_size(size_t size) const { 438 return size; 439 } 440 441 inline size_t obj_size(const HeapWord* addr) const { 442 return oop(addr)->size(); 443 } 444 445 protected: 446 // Used during compaction. 447 HeapWord* _first_dead; 448 HeapWord* _end_of_live; 449 450 // Minimum size of a free block. 451 virtual size_t minimum_free_block_size() const { return 0; } 452 453 // This the function is invoked when an allocation of an object covering 454 // "start" to "end occurs crosses the threshold; returns the next 455 // threshold. (The default implementation does nothing.) 456 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 457 return end(); 458 } 459 460 // Requires "allowed_deadspace_words > 0", that "q" is the start of a 461 // free block of the given "word_len", and that "q", were it an object, 462 // would not move if forwarded. If the size allows, fill the free 463 // block with an object, to prevent excessive compaction. Returns "true" 464 // iff the free region was made deadspace, and modifies 465 // "allowed_deadspace_words" to reflect the number of available deadspace 466 // words remaining after this operation. 467 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, 468 size_t word_len); 469 470 // Below are template functions for scan_and_* algorithms (avoiding virtual calls). 471 // The space argument should be a subclass of CompactibleSpace, implementing 472 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), 473 // and possibly also overriding obj_size(), and adjust_obj_size(). 474 // These functions should avoid virtual calls whenever possible. 475 476 // Frequently calls adjust_obj_size(). (Asserts on scanned_block_is_obj().) 477 template <class SpaceType> 478 static inline void scan_and_adjust_pointers(SpaceType* space); 479 480 // Frequently calls obj_size(). 481 template <class SpaceType> 482 static inline void scan_and_compact(SpaceType* space); 483 484 // Frequently calls scanned_block_is_obj() and scanned_block_size(). 485 // Requires the scan_limit() function. 486 template <class SpaceType> 487 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); 488 }; 489 490 class GenSpaceMangler; 491 492 // A space in which the free area is contiguous. It therefore supports 493 // faster allocation, and compaction. 494 class ContiguousSpace: public CompactibleSpace { 495 friend class OneContigSpaceCardGeneration; 496 friend class VMStructs; 497 protected: 498 HeapWord* _top; 499 HeapWord* _concurrent_iteration_safe_limit; 500 // A helper for mangling the unused area of the space in debug builds. 501 GenSpaceMangler* _mangler; 502 503 GenSpaceMangler* mangler() { return _mangler; } 504 505 // Allocation helpers (return NULL if full). 506 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); 507 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); 508 509 public: 510 ContiguousSpace(); 511 ~ContiguousSpace(); 512 513 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 514 virtual void clear(bool mangle_space); 515 516 // Accessors 517 HeapWord* top() const { return _top; } 518 void set_top(HeapWord* value) { _top = value; } 519 520 void set_saved_mark() { _saved_mark_word = top(); } 521 void reset_saved_mark() { _saved_mark_word = bottom(); } 522 523 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 524 WaterMark top_mark() { return WaterMark(this, top()); } 525 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 526 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 527 528 // In debug mode mangle (write it with a particular bit 529 // pattern) the unused part of a space. 530 531 // Used to save the an address in a space for later use during mangling. 532 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 533 // Used to save the space's current top for later use during mangling. 534 void set_top_for_allocations() PRODUCT_RETURN; 535 536 // Mangle regions in the space from the current top up to the 537 // previously mangled part of the space. 538 void mangle_unused_area() PRODUCT_RETURN; 539 // Mangle [top, end) 540 void mangle_unused_area_complete() PRODUCT_RETURN; 541 // Mangle the given MemRegion. 542 void mangle_region(MemRegion mr) PRODUCT_RETURN; 543 544 // Do some sparse checking on the area that should have been mangled. 545 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 546 // Check the complete area that should have been mangled. 547 // This code may be NULL depending on the macro DEBUG_MANGLING. 548 void check_mangled_unused_area_complete() PRODUCT_RETURN; 549 550 // Size computations: sizes in bytes. 551 size_t capacity() const { return byte_size(bottom(), end()); } 552 size_t used() const { return byte_size(bottom(), top()); } 553 size_t free() const { return byte_size(top(), end()); } 554 555 virtual bool is_free_block(const HeapWord* p) const; 556 557 // In a contiguous space we have a more obvious bound on what parts 558 // contain objects. 559 MemRegion used_region() const { return MemRegion(bottom(), top()); } 560 561 // Allocation (return NULL if full) 562 virtual HeapWord* allocate(size_t word_size); 563 virtual HeapWord* par_allocate(size_t word_size); 564 HeapWord* allocate_aligned(size_t word_size); 565 566 // Iteration 567 void oop_iterate(ExtendedOopClosure* cl); 568 void object_iterate(ObjectClosure* blk); 569 // For contiguous spaces this method will iterate safely over objects 570 // in the space (i.e., between bottom and top) when at a safepoint. 571 void safe_object_iterate(ObjectClosure* blk); 572 573 // Iterate over as many initialized objects in the space as possible, 574 // calling "cl.do_object_careful" on each. Return NULL if all objects 575 // in the space (at the start of the iteration) were iterated over. 576 // Return an address indicating the extent of the iteration in the 577 // event that the iteration had to return because of finding an 578 // uninitialized object in the space, or if the closure "cl" 579 // signaled early termination. 580 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 581 HeapWord* concurrent_iteration_safe_limit() { 582 assert(_concurrent_iteration_safe_limit <= top(), 583 "_concurrent_iteration_safe_limit update missed"); 584 return _concurrent_iteration_safe_limit; 585 } 586 // changes the safe limit, all objects from bottom() to the new 587 // limit should be properly initialized 588 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { 589 assert(new_limit <= top(), "uninitialized objects in the safe range"); 590 _concurrent_iteration_safe_limit = new_limit; 591 } 592 593 594 #if INCLUDE_ALL_GCS 595 // In support of parallel oop_iterate. 596 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 597 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 598 599 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 600 #undef ContigSpace_PAR_OOP_ITERATE_DECL 601 #endif // INCLUDE_ALL_GCS 602 603 // Compaction support 604 virtual void reset_after_compaction() { 605 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 606 set_top(compaction_top()); 607 // set new iteration safe limit 608 set_concurrent_iteration_safe_limit(compaction_top()); 609 } 610 611 // Override. 612 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 613 CardTableModRefBS::PrecisionStyle precision, 614 HeapWord* boundary = NULL); 615 616 // Apply "blk->do_oop" to the addresses of all reference fields in objects 617 // starting with the _saved_mark_word, which was noted during a generation's 618 // save_marks and is required to denote the head of an object. 619 // Fields in objects allocated by applications of the closure 620 // *are* included in the iteration. 621 // Updates _saved_mark_word to point to just after the last object 622 // iterated over. 623 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 624 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 625 626 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 627 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 628 629 // Same as object_iterate, but starting from "mark", which is required 630 // to denote the start of an object. Objects allocated by 631 // applications of the closure *are* included in the iteration. 632 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 633 634 // Very inefficient implementation. 635 virtual HeapWord* block_start_const(const void* p) const; 636 size_t block_size(const HeapWord* p) const; 637 // If a block is in the allocated area, it is an object. 638 bool block_is_obj(const HeapWord* p) const { return p < top(); } 639 640 // Addresses for inlined allocation 641 HeapWord** top_addr() { return &_top; } 642 HeapWord** end_addr() { return &_end; } 643 644 // Overrides for more efficient compaction support. 645 void prepare_for_compaction(CompactPoint* cp); 646 647 // PrintHeapAtGC support. 648 virtual void print_on(outputStream* st) const; 649 650 // Checked dynamic downcasts. 651 virtual ContiguousSpace* toContiguousSpace() { 652 return this; 653 } 654 655 // Debugging 656 virtual void verify() const; 657 658 // Used to increase collection frequency. "factor" of 0 means entire 659 // space. 660 void allocate_temporary_filler(int factor); 661 662 // Functions for scan_and_{forward,adjust_pointers,compact} support. 663 inline HeapWord* scan_limit() const { 664 return top(); 665 } 666 667 inline bool scanned_block_is_obj(const HeapWord* addr) const { 668 return true; // Always true, since scan_limit is top 669 } 670 671 inline size_t scanned_block_size(const HeapWord* addr) const { 672 return oop(addr)->size(); 673 } 674 }; 675 676 677 // A dirty card to oop closure that does filtering. 678 // It knows how to filter out objects that are outside of the _boundary. 679 class Filtering_DCTOC : public DirtyCardToOopClosure { 680 protected: 681 // Override. 682 void walk_mem_region(MemRegion mr, 683 HeapWord* bottom, HeapWord* top); 684 685 // Walk the given memory region, from bottom to top, applying 686 // the given oop closure to (possibly) all objects found. The 687 // given oop closure may or may not be the same as the oop 688 // closure with which this closure was created, as it may 689 // be a filtering closure which makes use of the _boundary. 690 // We offer two signatures, so the FilteringClosure static type is 691 // apparent. 692 virtual void walk_mem_region_with_cl(MemRegion mr, 693 HeapWord* bottom, HeapWord* top, 694 ExtendedOopClosure* cl) = 0; 695 virtual void walk_mem_region_with_cl(MemRegion mr, 696 HeapWord* bottom, HeapWord* top, 697 FilteringClosure* cl) = 0; 698 699 public: 700 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, 701 CardTableModRefBS::PrecisionStyle precision, 702 HeapWord* boundary) : 703 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 704 }; 705 706 // A dirty card to oop closure for contiguous spaces 707 // (ContiguousSpace and sub-classes). 708 // It is a FilteringClosure, as defined above, and it knows: 709 // 710 // 1. That the actual top of any area in a memory region 711 // contained by the space is bounded by the end of the contiguous 712 // region of the space. 713 // 2. That the space is really made up of objects and not just 714 // blocks. 715 716 class ContiguousSpaceDCTOC : public Filtering_DCTOC { 717 protected: 718 // Overrides. 719 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 720 721 virtual void walk_mem_region_with_cl(MemRegion mr, 722 HeapWord* bottom, HeapWord* top, 723 ExtendedOopClosure* cl); 724 virtual void walk_mem_region_with_cl(MemRegion mr, 725 HeapWord* bottom, HeapWord* top, 726 FilteringClosure* cl); 727 728 public: 729 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, 730 CardTableModRefBS::PrecisionStyle precision, 731 HeapWord* boundary) : 732 Filtering_DCTOC(sp, cl, precision, boundary) 733 {} 734 }; 735 736 737 // Class EdenSpace describes eden-space in new generation. 738 739 class DefNewGeneration; 740 741 class EdenSpace : public ContiguousSpace { 742 friend class VMStructs; 743 private: 744 DefNewGeneration* _gen; 745 746 // _soft_end is used as a soft limit on allocation. As soft limits are 747 // reached, the slow-path allocation code can invoke other actions and then 748 // adjust _soft_end up to a new soft limit or to end(). 749 HeapWord* _soft_end; 750 751 public: 752 EdenSpace(DefNewGeneration* gen) : 753 _gen(gen), _soft_end(NULL) {} 754 755 // Get/set just the 'soft' limit. 756 HeapWord* soft_end() { return _soft_end; } 757 HeapWord** soft_end_addr() { return &_soft_end; } 758 void set_soft_end(HeapWord* value) { _soft_end = value; } 759 760 // Override. 761 void clear(bool mangle_space); 762 763 // Set both the 'hard' and 'soft' limits (_end and _soft_end). 764 void set_end(HeapWord* value) { 765 set_soft_end(value); 766 ContiguousSpace::set_end(value); 767 } 768 769 // Allocation (return NULL if full) 770 HeapWord* allocate(size_t word_size); 771 HeapWord* par_allocate(size_t word_size); 772 }; 773 774 // Class ConcEdenSpace extends EdenSpace for the sake of safe 775 // allocation while soft-end is being modified concurrently 776 777 class ConcEdenSpace : public EdenSpace { 778 public: 779 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } 780 781 // Allocation (return NULL if full) 782 HeapWord* par_allocate(size_t word_size); 783 }; 784 785 786 // A ContigSpace that Supports an efficient "block_start" operation via 787 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 788 // other spaces.) This is the abstract base class for old generation 789 // (tenured) spaces. 790 791 class OffsetTableContigSpace: public ContiguousSpace { 792 friend class VMStructs; 793 protected: 794 BlockOffsetArrayContigSpace _offsets; 795 Mutex _par_alloc_lock; 796 797 public: 798 // Constructor 799 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 800 MemRegion mr); 801 802 void set_bottom(HeapWord* value); 803 void set_end(HeapWord* value); 804 805 void clear(bool mangle_space); 806 807 inline HeapWord* block_start_const(const void* p) const; 808 809 // Add offset table update. 810 virtual inline HeapWord* allocate(size_t word_size); 811 inline HeapWord* par_allocate(size_t word_size); 812 813 // MarkSweep support phase3 814 virtual HeapWord* initialize_threshold(); 815 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 816 817 virtual void print_on(outputStream* st) const; 818 819 // Debugging 820 void verify() const; 821 }; 822 823 824 // Class TenuredSpace is used by TenuredGeneration 825 826 class TenuredSpace: public OffsetTableContigSpace { 827 friend class VMStructs; 828 protected: 829 // Mark sweep support 830 size_t allowed_dead_ratio() const; 831 public: 832 // Constructor 833 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 834 MemRegion mr) : 835 OffsetTableContigSpace(sharedOffsetArray, mr) {} 836 }; 837 #endif // SHARE_VM_MEMORY_SPACE_HPP