1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP 26 #define SHARE_VM_MEMORY_SPACE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/blockOffsetTable.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/iterator.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/watermark.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/macros.hpp" 37 #include "utilities/workgroup.hpp" 38 39 // A space is an abstraction for the "storage units" backing 40 // up the generation abstraction. It includes specific 41 // implementations for keeping track of free and used space, 42 // for iterating over objects and free blocks, etc. 43 44 // Here's the Space hierarchy: 45 // 46 // - Space -- an abstract base class describing a heap area 47 // - CompactibleSpace -- a space supporting compaction 48 // - CompactibleFreeListSpace -- (used for CMS generation) 49 // - ContiguousSpace -- a compactible space in which all free space 50 // is contiguous 51 // - EdenSpace -- contiguous space used as nursery 52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 53 // - OffsetTableContigSpace -- contiguous space with a block offset array 54 // that allows "fast" block_start calls 55 // - TenuredSpace -- (used for TenuredGeneration) 56 57 // Forward decls. 58 class Space; 59 class BlockOffsetArray; 60 class BlockOffsetArrayContigSpace; 61 class Generation; 62 class CompactibleSpace; 63 class BlockOffsetTable; 64 class GenRemSet; 65 class CardTableRS; 66 class DirtyCardToOopClosure; 67 68 // A Space describes a heap area. Class Space is an abstract 69 // base class. 70 // 71 // Space supports allocation, size computation and GC support is provided. 72 // 73 // Invariant: bottom() and end() are on page_size boundaries and 74 // bottom() <= top() <= end() 75 // top() is inclusive and end() is exclusive. 76 77 class Space: public CHeapObj<mtGC> { 78 friend class VMStructs; 79 protected: 80 HeapWord* _bottom; 81 HeapWord* _end; 82 83 // Used in support of save_marks() 84 HeapWord* _saved_mark_word; 85 86 MemRegionClosure* _preconsumptionDirtyCardClosure; 87 88 // A sequential tasks done structure. This supports 89 // parallel GC, where we have threads dynamically 90 // claiming sub-tasks from a larger parallel task. 91 SequentialSubTasksDone _par_seq_tasks; 92 93 Space(): 94 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } 95 96 public: 97 // Accessors 98 HeapWord* bottom() const { return _bottom; } 99 HeapWord* end() const { return _end; } 100 virtual void set_bottom(HeapWord* value) { _bottom = value; } 101 virtual void set_end(HeapWord* value) { _end = value; } 102 103 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 104 105 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 106 107 // Returns true if this object has been allocated since a 108 // generation's "save_marks" call. 109 virtual bool obj_allocated_since_save_marks(const oop obj) const { 110 return (HeapWord*)obj >= saved_mark_word(); 111 } 112 113 MemRegionClosure* preconsumptionDirtyCardClosure() const { 114 return _preconsumptionDirtyCardClosure; 115 } 116 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 117 _preconsumptionDirtyCardClosure = cl; 118 } 119 120 // Returns a subregion of the space containing only the allocated objects in 121 // the space. 122 virtual MemRegion used_region() const = 0; 123 124 // Returns a region that is guaranteed to contain (at least) all objects 125 // allocated at the time of the last call to "save_marks". If the space 126 // initializes its DirtyCardToOopClosure's specifying the "contig" option 127 // (that is, if the space is contiguous), then this region must contain only 128 // such objects: the memregion will be from the bottom of the region to the 129 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 130 // the space must distinguish between objects in the region allocated before 131 // and after the call to save marks. 132 MemRegion used_region_at_save_marks() const { 133 return MemRegion(bottom(), saved_mark_word()); 134 } 135 136 // Initialization. 137 // "initialize" should be called once on a space, before it is used for 138 // any purpose. The "mr" arguments gives the bounds of the space, and 139 // the "clear_space" argument should be true unless the memory in "mr" is 140 // known to be zeroed. 141 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 142 143 // The "clear" method must be called on a region that may have 144 // had allocation performed in it, but is now to be considered empty. 145 virtual void clear(bool mangle_space); 146 147 // For detecting GC bugs. Should only be called at GC boundaries, since 148 // some unused space may be used as scratch space during GC's. 149 // Default implementation does nothing. We also call this when expanding 150 // a space to satisfy an allocation request. See bug #4668531 151 virtual void mangle_unused_area() {} 152 virtual void mangle_unused_area_complete() {} 153 virtual void mangle_region(MemRegion mr) {} 154 155 // Testers 156 bool is_empty() const { return used() == 0; } 157 bool not_empty() const { return used() > 0; } 158 159 // Returns true iff the given the space contains the 160 // given address as part of an allocated object. For 161 // certain kinds of spaces, this might be a potentially 162 // expensive operation. To prevent performance problems 163 // on account of its inadvertent use in product jvm's, 164 // we restrict its use to assertion checks only. 165 bool is_in(const void* p) const { 166 return used_region().contains(p); 167 } 168 169 // Returns true iff the given reserved memory of the space contains the 170 // given address. 171 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 172 173 // Returns true iff the given block is not allocated. 174 virtual bool is_free_block(const HeapWord* p) const = 0; 175 176 // Test whether p is double-aligned 177 static bool is_aligned(void* p) { 178 return ((intptr_t)p & (sizeof(double)-1)) == 0; 179 } 180 181 // Size computations. Sizes are in bytes. 182 size_t capacity() const { return byte_size(bottom(), end()); } 183 virtual size_t used() const = 0; 184 virtual size_t free() const = 0; 185 186 // Iterate over all the ref-containing fields of all objects in the 187 // space, calling "cl.do_oop" on each. Fields in objects allocated by 188 // applications of the closure are not included in the iteration. 189 virtual void oop_iterate(ExtendedOopClosure* cl); 190 191 // Iterate over all objects in the space, calling "cl.do_object" on 192 // each. Objects allocated by applications of the closure are not 193 // included in the iteration. 194 virtual void object_iterate(ObjectClosure* blk) = 0; 195 // Similar to object_iterate() except only iterates over 196 // objects whose internal references point to objects in the space. 197 virtual void safe_object_iterate(ObjectClosure* blk) = 0; 198 199 // Create and return a new dirty card to oop closure. Can be 200 // overridden to return the appropriate type of closure 201 // depending on the type of space in which the closure will 202 // operate. ResourceArea allocated. 203 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 204 CardTableModRefBS::PrecisionStyle precision, 205 HeapWord* boundary = NULL); 206 207 // If "p" is in the space, returns the address of the start of the 208 // "block" that contains "p". We say "block" instead of "object" since 209 // some heaps may not pack objects densely; a chunk may either be an 210 // object or a non-object. If "p" is not in the space, return NULL. 211 virtual HeapWord* block_start_const(const void* p) const = 0; 212 213 // The non-const version may have benevolent side effects on the data 214 // structure supporting these calls, possibly speeding up future calls. 215 // The default implementation, however, is simply to call the const 216 // version. 217 inline virtual HeapWord* block_start(const void* p); 218 219 // Requires "addr" to be the start of a chunk, and returns its size. 220 // "addr + size" is required to be the start of a new chunk, or the end 221 // of the active area of the heap. 222 virtual size_t block_size(const HeapWord* addr) const = 0; 223 224 // Requires "addr" to be the start of a block, and returns "TRUE" iff 225 // the block is an object. 226 virtual bool block_is_obj(const HeapWord* addr) const = 0; 227 228 // Requires "addr" to be the start of a block, and returns "TRUE" iff 229 // the block is an object and the object is alive. 230 virtual bool obj_is_alive(const HeapWord* addr) const; 231 232 // Allocation (return NULL if full). Assumes the caller has established 233 // mutually exclusive access to the space. 234 virtual HeapWord* allocate(size_t word_size) = 0; 235 236 // Allocation (return NULL if full). Enforces mutual exclusion internally. 237 virtual HeapWord* par_allocate(size_t word_size) = 0; 238 239 // Mark-sweep-compact support: all spaces can update pointers to objects 240 // moving as a part of compaction. 241 virtual void adjust_pointers(); 242 243 // PrintHeapAtGC support 244 virtual void print() const; 245 virtual void print_on(outputStream* st) const; 246 virtual void print_short() const; 247 virtual void print_short_on(outputStream* st) const; 248 249 250 // Accessor for parallel sequential tasks. 251 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 252 253 // IF "this" is a ContiguousSpace, return it, else return NULL. 254 virtual ContiguousSpace* toContiguousSpace() { 255 return NULL; 256 } 257 258 // Debugging 259 virtual void verify() const = 0; 260 }; 261 262 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 263 // OopClosure to (the addresses of) all the ref-containing fields that could 264 // be modified by virtue of the given MemRegion being dirty. (Note that 265 // because of the imprecise nature of the write barrier, this may iterate 266 // over oops beyond the region.) 267 // This base type for dirty card to oop closures handles memory regions 268 // in non-contiguous spaces with no boundaries, and should be sub-classed 269 // to support other space types. See ContiguousDCTOC for a sub-class 270 // that works with ContiguousSpaces. 271 272 class DirtyCardToOopClosure: public MemRegionClosureRO { 273 protected: 274 ExtendedOopClosure* _cl; 275 Space* _sp; 276 CardTableModRefBS::PrecisionStyle _precision; 277 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 278 // pointing below boundary. 279 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 280 // a downwards traversal; this is the 281 // lowest location already done (or, 282 // alternatively, the lowest address that 283 // shouldn't be done again. NULL means infinity.) 284 NOT_PRODUCT(HeapWord* _last_bottom;) 285 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 286 287 // Get the actual top of the area on which the closure will 288 // operate, given where the top is assumed to be (the end of the 289 // memory region passed to do_MemRegion) and where the object 290 // at the top is assumed to start. For example, an object may 291 // start at the top but actually extend past the assumed top, 292 // in which case the top becomes the end of the object. 293 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 294 295 // Walk the given memory region from bottom to (actual) top 296 // looking for objects and applying the oop closure (_cl) to 297 // them. The base implementation of this treats the area as 298 // blocks, where a block may or may not be an object. Sub- 299 // classes should override this to provide more accurate 300 // or possibly more efficient walking. 301 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 302 303 public: 304 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, 305 CardTableModRefBS::PrecisionStyle precision, 306 HeapWord* boundary) : 307 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 308 _min_done(NULL) { 309 NOT_PRODUCT(_last_bottom = NULL); 310 NOT_PRODUCT(_last_explicit_min_done = NULL); 311 } 312 313 void do_MemRegion(MemRegion mr); 314 315 void set_min_done(HeapWord* min_done) { 316 _min_done = min_done; 317 NOT_PRODUCT(_last_explicit_min_done = _min_done); 318 } 319 #ifndef PRODUCT 320 void set_last_bottom(HeapWord* last_bottom) { 321 _last_bottom = last_bottom; 322 } 323 #endif 324 }; 325 326 // A structure to represent a point at which objects are being copied 327 // during compaction. 328 class CompactPoint : public StackObj { 329 public: 330 Generation* gen; 331 CompactibleSpace* space; 332 HeapWord* threshold; 333 334 CompactPoint(Generation* _gen) : 335 gen(_gen), space(NULL), threshold(0) {} 336 }; 337 338 339 // A space that supports compaction operations. This is usually, but not 340 // necessarily, a space that is normally contiguous. But, for example, a 341 // free-list-based space whose normal collection is a mark-sweep without 342 // compaction could still support compaction in full GC's. 343 344 class CompactibleSpace: public Space { 345 friend class VMStructs; 346 friend class CompactibleFreeListSpace; 347 private: 348 HeapWord* _compaction_top; 349 CompactibleSpace* _next_compaction_space; 350 351 public: 352 CompactibleSpace() : 353 _compaction_top(NULL), _next_compaction_space(NULL) {} 354 355 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 356 virtual void clear(bool mangle_space); 357 358 // Used temporarily during a compaction phase to hold the value 359 // top should have when compaction is complete. 360 HeapWord* compaction_top() const { return _compaction_top; } 361 362 void set_compaction_top(HeapWord* value) { 363 assert(value == NULL || (value >= bottom() && value <= end()), 364 "should point inside space"); 365 _compaction_top = value; 366 } 367 368 // Perform operations on the space needed after a compaction 369 // has been performed. 370 virtual void reset_after_compaction() = 0; 371 372 // Returns the next space (in the current generation) to be compacted in 373 // the global compaction order. Also is used to select the next 374 // space into which to compact. 375 376 virtual CompactibleSpace* next_compaction_space() const { 377 return _next_compaction_space; 378 } 379 380 void set_next_compaction_space(CompactibleSpace* csp) { 381 _next_compaction_space = csp; 382 } 383 384 // MarkSweep support phase2 385 386 // Start the process of compaction of the current space: compute 387 // post-compaction addresses, and insert forwarding pointers. The fields 388 // "cp->gen" and "cp->compaction_space" are the generation and space into 389 // which we are currently compacting. This call updates "cp" as necessary, 390 // and leaves the "compaction_top" of the final value of 391 // "cp->compaction_space" up-to-date. Offset tables may be updated in 392 // this phase as if the final copy had occurred; if so, "cp->threshold" 393 // indicates when the next such action should be taken. 394 virtual void prepare_for_compaction(CompactPoint* cp); 395 // MarkSweep support phase3 396 virtual void adjust_pointers(); 397 // MarkSweep support phase4 398 virtual void compact(); 399 400 // The maximum percentage of objects that can be dead in the compacted 401 // live part of a compacted space ("deadwood" support.) 402 virtual size_t allowed_dead_ratio() const { return 0; }; 403 404 // Some contiguous spaces may maintain some data structures that should 405 // be updated whenever an allocation crosses a boundary. This function 406 // returns the first such boundary. 407 // (The default implementation returns the end of the space, so the 408 // boundary is never crossed.) 409 virtual HeapWord* initialize_threshold() { return end(); } 410 411 // "q" is an object of the given "size" that should be forwarded; 412 // "cp" names the generation ("gen") and containing "this" (which must 413 // also equal "cp->space"). "compact_top" is where in "this" the 414 // next object should be forwarded to. If there is room in "this" for 415 // the object, insert an appropriate forwarding pointer in "q". 416 // If not, go to the next compaction space (there must 417 // be one, since compaction must succeed -- we go to the first space of 418 // the previous generation if necessary, updating "cp"), reset compact_top 419 // and then forward. In either case, returns the new value of "compact_top". 420 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" 421 // function of the then-current compaction space, and updates "cp->threshold 422 // accordingly". 423 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 424 HeapWord* compact_top); 425 426 // Return a size with adjustments as required of the space. 427 virtual size_t adjust_object_size_v(size_t size) const { return size; } 428 429 protected: 430 // Used during compaction. 431 HeapWord* _first_dead; 432 HeapWord* _end_of_live; 433 434 // Minimum size of a free block. 435 virtual size_t minimum_free_block_size() const { return 0; } 436 437 // This the function is invoked when an allocation of an object covering 438 // "start" to "end occurs crosses the threshold; returns the next 439 // threshold. (The default implementation does nothing.) 440 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 441 return end(); 442 } 443 444 // Requires "allowed_deadspace_words > 0", that "q" is the start of a 445 // free block of the given "word_len", and that "q", were it an object, 446 // would not move if forwarded. If the size allows, fill the free 447 // block with an object, to prevent excessive compaction. Returns "true" 448 // iff the free region was made deadspace, and modifies 449 // "allowed_deadspace_words" to reflect the number of available deadspace 450 // words remaining after this operation. 451 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, 452 size_t word_len); 453 }; 454 455 class GenSpaceMangler; 456 457 // A space in which the free area is contiguous. It therefore supports 458 // faster allocation, and compaction. 459 class ContiguousSpace: public CompactibleSpace { 460 friend class OneContigSpaceCardGeneration; 461 friend class VMStructs; 462 protected: 463 HeapWord* _top; 464 HeapWord* _concurrent_iteration_safe_limit; 465 // A helper for mangling the unused area of the space in debug builds. 466 GenSpaceMangler* _mangler; 467 468 GenSpaceMangler* mangler() { return _mangler; } 469 470 // Allocation helpers (return NULL if full). 471 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); 472 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); 473 474 public: 475 ContiguousSpace(); 476 ~ContiguousSpace(); 477 478 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 479 virtual void clear(bool mangle_space); 480 481 // Accessors 482 HeapWord* top() const { return _top; } 483 void set_top(HeapWord* value) { _top = value; } 484 485 void set_saved_mark() { _saved_mark_word = top(); } 486 void reset_saved_mark() { _saved_mark_word = bottom(); } 487 488 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 489 WaterMark top_mark() { return WaterMark(this, top()); } 490 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 491 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 492 493 // In debug mode mangle (write it with a particular bit 494 // pattern) the unused part of a space. 495 496 // Used to save the an address in a space for later use during mangling. 497 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 498 // Used to save the space's current top for later use during mangling. 499 void set_top_for_allocations() PRODUCT_RETURN; 500 501 // Mangle regions in the space from the current top up to the 502 // previously mangled part of the space. 503 void mangle_unused_area() PRODUCT_RETURN; 504 // Mangle [top, end) 505 void mangle_unused_area_complete() PRODUCT_RETURN; 506 // Mangle the given MemRegion. 507 void mangle_region(MemRegion mr) PRODUCT_RETURN; 508 509 // Do some sparse checking on the area that should have been mangled. 510 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 511 // Check the complete area that should have been mangled. 512 // This code may be NULL depending on the macro DEBUG_MANGLING. 513 void check_mangled_unused_area_complete() PRODUCT_RETURN; 514 515 // Size computations: sizes in bytes. 516 size_t capacity() const { return byte_size(bottom(), end()); } 517 size_t used() const { return byte_size(bottom(), top()); } 518 size_t free() const { return byte_size(top(), end()); } 519 520 virtual bool is_free_block(const HeapWord* p) const; 521 522 // In a contiguous space we have a more obvious bound on what parts 523 // contain objects. 524 MemRegion used_region() const { return MemRegion(bottom(), top()); } 525 526 // Allocation (return NULL if full) 527 virtual HeapWord* allocate(size_t word_size); 528 virtual HeapWord* par_allocate(size_t word_size); 529 HeapWord* allocate_aligned(size_t word_size); 530 531 // Iteration 532 void oop_iterate(ExtendedOopClosure* cl); 533 void object_iterate(ObjectClosure* blk); 534 // For contiguous spaces this method will iterate safely over objects 535 // in the space (i.e., between bottom and top) when at a safepoint. 536 void safe_object_iterate(ObjectClosure* blk); 537 538 // Iterate over as many initialized objects in the space as possible, 539 // calling "cl.do_object_careful" on each. Return NULL if all objects 540 // in the space (at the start of the iteration) were iterated over. 541 // Return an address indicating the extent of the iteration in the 542 // event that the iteration had to return because of finding an 543 // uninitialized object in the space, or if the closure "cl" 544 // signaled early termination. 545 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 546 HeapWord* concurrent_iteration_safe_limit() { 547 assert(_concurrent_iteration_safe_limit <= top(), 548 "_concurrent_iteration_safe_limit update missed"); 549 return _concurrent_iteration_safe_limit; 550 } 551 // changes the safe limit, all objects from bottom() to the new 552 // limit should be properly initialized 553 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { 554 assert(new_limit <= top(), "uninitialized objects in the safe range"); 555 _concurrent_iteration_safe_limit = new_limit; 556 } 557 558 559 #if INCLUDE_ALL_GCS 560 // In support of parallel oop_iterate. 561 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 562 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 563 564 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 565 #undef ContigSpace_PAR_OOP_ITERATE_DECL 566 #endif // INCLUDE_ALL_GCS 567 568 // Compaction support 569 virtual void reset_after_compaction() { 570 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 571 set_top(compaction_top()); 572 // set new iteration safe limit 573 set_concurrent_iteration_safe_limit(compaction_top()); 574 } 575 576 // Override. 577 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 578 CardTableModRefBS::PrecisionStyle precision, 579 HeapWord* boundary = NULL); 580 581 // Apply "blk->do_oop" to the addresses of all reference fields in objects 582 // starting with the _saved_mark_word, which was noted during a generation's 583 // save_marks and is required to denote the head of an object. 584 // Fields in objects allocated by applications of the closure 585 // *are* included in the iteration. 586 // Updates _saved_mark_word to point to just after the last object 587 // iterated over. 588 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 589 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 590 591 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 592 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 593 594 // Same as object_iterate, but starting from "mark", which is required 595 // to denote the start of an object. Objects allocated by 596 // applications of the closure *are* included in the iteration. 597 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 598 599 // Very inefficient implementation. 600 virtual HeapWord* block_start_const(const void* p) const; 601 size_t block_size(const HeapWord* p) const; 602 // If a block is in the allocated area, it is an object. 603 bool block_is_obj(const HeapWord* p) const { return p < top(); } 604 605 // Addresses for inlined allocation 606 HeapWord** top_addr() { return &_top; } 607 HeapWord** end_addr() { return &_end; } 608 609 // Overrides for more efficient compaction support. 610 void prepare_for_compaction(CompactPoint* cp); 611 612 // PrintHeapAtGC support. 613 virtual void print_on(outputStream* st) const; 614 615 // Checked dynamic downcasts. 616 virtual ContiguousSpace* toContiguousSpace() { 617 return this; 618 } 619 620 // Debugging 621 virtual void verify() const; 622 623 // Used to increase collection frequency. "factor" of 0 means entire 624 // space. 625 void allocate_temporary_filler(int factor); 626 627 }; 628 629 630 // A dirty card to oop closure that does filtering. 631 // It knows how to filter out objects that are outside of the _boundary. 632 class Filtering_DCTOC : public DirtyCardToOopClosure { 633 protected: 634 // Override. 635 void walk_mem_region(MemRegion mr, 636 HeapWord* bottom, HeapWord* top); 637 638 // Walk the given memory region, from bottom to top, applying 639 // the given oop closure to (possibly) all objects found. The 640 // given oop closure may or may not be the same as the oop 641 // closure with which this closure was created, as it may 642 // be a filtering closure which makes use of the _boundary. 643 // We offer two signatures, so the FilteringClosure static type is 644 // apparent. 645 virtual void walk_mem_region_with_cl(MemRegion mr, 646 HeapWord* bottom, HeapWord* top, 647 ExtendedOopClosure* cl) = 0; 648 virtual void walk_mem_region_with_cl(MemRegion mr, 649 HeapWord* bottom, HeapWord* top, 650 FilteringClosure* cl) = 0; 651 652 public: 653 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, 654 CardTableModRefBS::PrecisionStyle precision, 655 HeapWord* boundary) : 656 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 657 }; 658 659 // A dirty card to oop closure for contiguous spaces 660 // (ContiguousSpace and sub-classes). 661 // It is a FilteringClosure, as defined above, and it knows: 662 // 663 // 1. That the actual top of any area in a memory region 664 // contained by the space is bounded by the end of the contiguous 665 // region of the space. 666 // 2. That the space is really made up of objects and not just 667 // blocks. 668 669 class ContiguousSpaceDCTOC : public Filtering_DCTOC { 670 protected: 671 // Overrides. 672 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 673 674 virtual void walk_mem_region_with_cl(MemRegion mr, 675 HeapWord* bottom, HeapWord* top, 676 ExtendedOopClosure* cl); 677 virtual void walk_mem_region_with_cl(MemRegion mr, 678 HeapWord* bottom, HeapWord* top, 679 FilteringClosure* cl); 680 681 public: 682 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, 683 CardTableModRefBS::PrecisionStyle precision, 684 HeapWord* boundary) : 685 Filtering_DCTOC(sp, cl, precision, boundary) 686 {} 687 }; 688 689 690 // Class EdenSpace describes eden-space in new generation. 691 692 class DefNewGeneration; 693 694 class EdenSpace : public ContiguousSpace { 695 friend class VMStructs; 696 private: 697 DefNewGeneration* _gen; 698 699 // _soft_end is used as a soft limit on allocation. As soft limits are 700 // reached, the slow-path allocation code can invoke other actions and then 701 // adjust _soft_end up to a new soft limit or to end(). 702 HeapWord* _soft_end; 703 704 public: 705 EdenSpace(DefNewGeneration* gen) : 706 _gen(gen), _soft_end(NULL) {} 707 708 // Get/set just the 'soft' limit. 709 HeapWord* soft_end() { return _soft_end; } 710 HeapWord** soft_end_addr() { return &_soft_end; } 711 void set_soft_end(HeapWord* value) { _soft_end = value; } 712 713 // Override. 714 void clear(bool mangle_space); 715 716 // Set both the 'hard' and 'soft' limits (_end and _soft_end). 717 void set_end(HeapWord* value) { 718 set_soft_end(value); 719 ContiguousSpace::set_end(value); 720 } 721 722 // Allocation (return NULL if full) 723 HeapWord* allocate(size_t word_size); 724 HeapWord* par_allocate(size_t word_size); 725 }; 726 727 // Class ConcEdenSpace extends EdenSpace for the sake of safe 728 // allocation while soft-end is being modified concurrently 729 730 class ConcEdenSpace : public EdenSpace { 731 public: 732 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } 733 734 // Allocation (return NULL if full) 735 HeapWord* par_allocate(size_t word_size); 736 }; 737 738 739 // A ContigSpace that Supports an efficient "block_start" operation via 740 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 741 // other spaces.) This is the abstract base class for old generation 742 // (tenured) spaces. 743 744 class OffsetTableContigSpace: public ContiguousSpace { 745 friend class VMStructs; 746 protected: 747 BlockOffsetArrayContigSpace _offsets; 748 Mutex _par_alloc_lock; 749 750 public: 751 // Constructor 752 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 753 MemRegion mr); 754 755 void set_bottom(HeapWord* value); 756 void set_end(HeapWord* value); 757 758 void clear(bool mangle_space); 759 760 inline HeapWord* block_start_const(const void* p) const; 761 762 // Add offset table update. 763 virtual inline HeapWord* allocate(size_t word_size); 764 inline HeapWord* par_allocate(size_t word_size); 765 766 // MarkSweep support phase3 767 virtual HeapWord* initialize_threshold(); 768 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 769 770 virtual void print_on(outputStream* st) const; 771 772 // Debugging 773 void verify() const; 774 }; 775 776 777 // Class TenuredSpace is used by TenuredGeneration 778 779 class TenuredSpace: public OffsetTableContigSpace { 780 friend class VMStructs; 781 protected: 782 // Mark sweep support 783 size_t allowed_dead_ratio() const; 784 public: 785 // Constructor 786 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 787 MemRegion mr) : 788 OffsetTableContigSpace(sharedOffsetArray, mr) {} 789 }; 790 #endif // SHARE_VM_MEMORY_SPACE_HPP