1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)space.hpp 1.149 07/05/29 09:44:14 JVM" 3 #endif 4 /* 5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 // A space is an abstraction for the "storage units" backing 29 // up the generation abstraction. It includes specific 30 // implementations for keeping track of free and used space, 31 // for iterating over objects and free blocks, etc. 32 33 // Here's the Space hierarchy: 34 // 35 // - Space -- an asbtract base class describing a heap area 36 // - CompactibleSpace -- a space supporting compaction 37 // - CompactibleFreeListSpace -- (used for CMS generation) 38 // - ContiguousSpace -- a compactible space in which all free space 39 // is contiguous 40 // - EdenSpace -- contiguous space used as nursery 41 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 42 // - OffsetTableContigSpace -- contiguous space with a block offset array 43 // that allows "fast" block_start calls 44 // - TenuredSpace -- (used for TenuredGeneration) 45 // - ContigPermSpace -- an offset table contiguous space for perm gen 46 47 // Forward decls. 48 class Space; 49 class BlockOffsetArray; 50 class BlockOffsetArrayContigSpace; 51 class Generation; 52 class CompactibleSpace; 53 class BlockOffsetTable; 54 class GenRemSet; 55 class CardTableRS; 56 class DirtyCardToOopClosure; 57 58 59 // An oop closure that is circumscribed by a filtering memory region. 60 class SpaceMemRegionOopsIterClosure: public virtual OopClosure { 61 OopClosure* cl; 62 MemRegion mr; 63 public: 64 void do_oop(oop* p) { 65 if (mr.contains(p)) { 66 cl->do_oop(p); 67 } 68 } 69 SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {} 70 }; 71 72 73 // A Space describes a heap area. Class Space is an abstract 74 // base class. 75 // 76 // Space supports allocation, size computation and GC support is provided. 77 // 78 // Invariant: bottom() and end() are on page_size boundaries and 79 // bottom() <= top() <= end() 80 // top() is inclusive and end() is exclusive. 81 82 class Space: public CHeapObj { 83 friend class VMStructs; 84 protected: 85 HeapWord* _bottom; 86 HeapWord* _end; 87 88 // Used in support of save_marks() 89 HeapWord* _saved_mark_word; 90 91 MemRegionClosure* _preconsumptionDirtyCardClosure; 92 93 // A sequential tasks done structure. This supports 94 // parallel GC, where we have threads dynamically 95 // claiming sub-tasks from a larger parallel task. 96 SequentialSubTasksDone _par_seq_tasks; 97 98 Space(): 99 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } 100 101 public: 102 // Accessors 103 HeapWord* bottom() const { return _bottom; } 104 HeapWord* end() const { return _end; } 105 virtual void set_bottom(HeapWord* value) { _bottom = value; } 106 virtual void set_end(HeapWord* value) { _end = value; } 107 108 HeapWord* saved_mark_word() const { return _saved_mark_word; } 109 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 110 111 MemRegionClosure* preconsumptionDirtyCardClosure() const { 112 return _preconsumptionDirtyCardClosure; 113 } 114 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 115 _preconsumptionDirtyCardClosure = cl; 116 } 117 118 // Returns a subregion of the space containing all the objects in 119 // the space. 120 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } 121 122 // Returns a region that is guaranteed to contain (at least) all objects 123 // allocated at the time of the last call to "save_marks". If the space 124 // initializes its DirtyCardToOopClosure's specifying the "contig" option 125 // (that is, if the space is contiguous), then this region must contain only 126 // such objects: the memregion will be from the bottom of the region to the 127 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 128 // the space must distiguish between objects in the region allocated before 129 // and after the call to save marks. 130 virtual MemRegion used_region_at_save_marks() const { 131 return MemRegion(bottom(), saved_mark_word()); 132 } 133 134 // Initialization 135 virtual void initialize(MemRegion mr, bool clear_space); 136 virtual void clear(); 137 138 // For detecting GC bugs. Should only be called at GC boundaries, since 139 // some unused space may be used as scratch space during GC's. 140 // Default implementation does nothing. We also call this when expanding 141 // a space to satisfy an allocation request. See bug #4668531 142 virtual void mangle_unused_area() {} 143 virtual void mangle_region(MemRegion mr) {} 144 145 // Testers 146 bool is_empty() const { return used() == 0; } 147 bool not_empty() const { return used() > 0; } 148 149 // Returns true iff the given the space contains the 150 // given address as part of an allocated object. For 151 // ceratin kinds of spaces, this might be a potentially 152 // expensive operation. To prevent performance problems 153 // on account of its inadvertent use in product jvm's, 154 // we restrict its use to assertion checks only. 155 virtual bool is_in(const void* p) const; 156 157 // Returns true iff the given reserved memory of the space contains the 158 // given address. 159 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 160 161 // Returns true iff the given block is not allocated. 162 virtual bool is_free_block(const HeapWord* p) const = 0; 163 164 // Test whether p is double-aligned 165 static bool is_aligned(void* p) { 166 return ((intptr_t)p & (sizeof(double)-1)) == 0; 167 } 168 169 // Size computations. Sizes are in bytes. 170 size_t capacity() const { return byte_size(bottom(), end()); } 171 virtual size_t used() const = 0; 172 virtual size_t free() const = 0; 173 174 // Iterate over all the ref-containing fields of all objects in the 175 // space, calling "cl.do_oop" on each. Fields in objects allocated by 176 // applications of the closure are not included in the iteration. 177 virtual void oop_iterate(OopClosure* cl); 178 179 // Same as above, restricted to the intersection of a memory region and 180 // the space. Fields in objects allocated by applications of the closure 181 // are not included in the iteration. 182 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; 183 184 // Iterate over all objects in the space, calling "cl.do_object" on 185 // each. Objects allocated by applications of the closure are not 186 // included in the iteration. 187 virtual void object_iterate(ObjectClosure* blk) = 0; 188 189 // Iterate over all objects that intersect with mr, calling "cl->do_object" 190 // on each. There is an exception to this: if this closure has already 191 // been invoked on an object, it may skip such objects in some cases. This is 192 // Most likely to happen in an "upwards" (ascending address) iteration of 193 // MemRegions. 194 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 195 196 // Iterate over as many initialized objects in the space as possible, 197 // calling "cl.do_object_careful" on each. Return NULL if all objects 198 // in the space (at the start of the iteration) were iterated over. 199 // Return an address indicating the extent of the iteration in the 200 // event that the iteration had to return because of finding an 201 // uninitialized object in the space, or if the closure "cl" 202 // signalled early termination. 203 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 204 virtual HeapWord* object_iterate_careful_m(MemRegion mr, 205 ObjectClosureCareful* cl); 206 207 // Create and return a new dirty card to oop closure. Can be 208 // overriden to return the appropriate type of closure 209 // depending on the type of space in which the closure will 210 // operate. ResourceArea allocated. 211 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 212 CardTableModRefBS::PrecisionStyle precision, 213 HeapWord* boundary = NULL); 214 215 // If "p" is in the space, returns the address of the start of the 216 // "block" that contains "p". We say "block" instead of "object" since 217 // some heaps may not pack objects densely; a chunk may either be an 218 // object or a non-object. If "p" is not in the space, return NULL. 219 virtual HeapWord* block_start(const void* p) const = 0; 220 221 // Requires "addr" to be the start of a chunk, and returns its size. 222 // "addr + size" is required to be the start of a new chunk, or the end 223 // of the active area of the heap. 224 virtual size_t block_size(const HeapWord* addr) const = 0; 225 226 // Requires "addr" to be the start of a block, and returns "TRUE" iff 227 // the block is an object. 228 virtual bool block_is_obj(const HeapWord* addr) const = 0; 229 230 // Requires "addr" to be the start of a block, and returns "TRUE" iff 231 // the block is an object and the object is alive. 232 virtual bool obj_is_alive(const HeapWord* addr) const; 233 234 // Allocation (return NULL if full). Assumes the caller has established 235 // mutually exclusive access to the space. 236 virtual HeapWord* allocate(size_t word_size) = 0; 237 238 // Allocation (return NULL if full). Enforces mutual exclusion internally. 239 virtual HeapWord* par_allocate(size_t word_size) = 0; 240 241 // Returns true if this object has been allocated since a 242 // generation's "save_marks" call. 243 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; 244 245 // Mark-sweep-compact support: all spaces can update pointers to objects 246 // moving as a part of compaction. 247 virtual void adjust_pointers(); 248 249 // PrintHeapAtGC support 250 virtual void print() const; 251 virtual void print_on(outputStream* st) const; 252 virtual void print_short() const; 253 virtual void print_short_on(outputStream* st) const; 254 255 256 // Accessor for parallel sequential tasks. 257 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 258 259 // IF "this" is a ContiguousSpace, return it, else return NULL. 260 virtual ContiguousSpace* toContiguousSpace() { 261 return NULL; 262 } 263 264 // Debugging 265 virtual void verify(bool allow_dirty) const = 0; 266 }; 267 268 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 269 // OopClosure to (the addresses of) all the ref-containing fields that could 270 // be modified by virtue of the given MemRegion being dirty. (Note that 271 // because of the imprecise nature of the write barrier, this may iterate 272 // over oops beyond the region.) 273 // This base type for dirty card to oop closures handles memory regions 274 // in non-contiguous spaces with no boundaries, and should be sub-classed 275 // to support other space types. See ContiguousDCTOC for a sub-class 276 // that works with ContiguousSpaces. 277 278 class DirtyCardToOopClosure: public MemRegionClosureRO { 279 protected: 280 OopClosure* _cl; 281 Space* _sp; 282 CardTableModRefBS::PrecisionStyle _precision; 283 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 284 // pointing below boundary. 285 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 286 // a downwards traversal; this is the 287 // lowest location already done (or, 288 // alternatively, the lowest address that 289 // shouldn't be done again. NULL means infinity.) 290 NOT_PRODUCT(HeapWord* _last_bottom;) 291 292 // Get the actual top of the area on which the closure will 293 // operate, given where the top is assumed to be (the end of the 294 // memory region passed to do_MemRegion) and where the object 295 // at the top is assumed to start. For example, an object may 296 // start at the top but actually extend past the assumed top, 297 // in which case the top becomes the end of the object. 298 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 299 300 // Walk the given memory region from bottom to (actual) top 301 // looking for objects and applying the oop closure (_cl) to 302 // them. The base implementation of this treats the area as 303 // blocks, where a block may or may not be an object. Sub- 304 // classes should override this to provide more accurate 305 // or possibly more efficient walking. 306 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 307 308 public: 309 DirtyCardToOopClosure(Space* sp, OopClosure* cl, 310 CardTableModRefBS::PrecisionStyle precision, 311 HeapWord* boundary) : 312 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 313 _min_done(NULL) { 314 NOT_PRODUCT(_last_bottom = NULL;) 315 } 316 317 void do_MemRegion(MemRegion mr); 318 319 void set_min_done(HeapWord* min_done) { 320 _min_done = min_done; 321 } 322 #ifndef PRODUCT 323 void set_last_bottom(HeapWord* last_bottom) { 324 _last_bottom = last_bottom; 325 } 326 #endif 327 }; 328 329 // A structure to represent a point at which objects are being copied 330 // during compaction. 331 class CompactPoint : public StackObj { 332 public: 333 Generation* gen; 334 CompactibleSpace* space; 335 HeapWord* threshold; 336 CompactPoint(Generation* _gen, CompactibleSpace* _space, 337 HeapWord* _threshold) : 338 gen(_gen), space(_space), threshold(_threshold) {} 339 }; 340 341 342 // A space that supports compaction operations. This is usually, but not 343 // necessarily, a space that is normally contiguous. But, for example, a 344 // free-list-based space whose normal collection is a mark-sweep without 345 // compaction could still support compaction in full GC's. 346 347 class CompactibleSpace: public Space { 348 friend class VMStructs; 349 friend class CompactibleFreeListSpace; 350 friend class CompactingPermGenGen; 351 friend class CMSPermGenGen; 352 private: 353 HeapWord* _compaction_top; 354 CompactibleSpace* _next_compaction_space; 355 356 public: 357 virtual void initialize(MemRegion mr, bool clear_space); 358 359 // Used temporarily during a compaction phase to hold the value 360 // top should have when compaction is complete. 361 HeapWord* compaction_top() const { return _compaction_top; } 362 363 void set_compaction_top(HeapWord* value) { 364 assert(value == NULL || (value >= bottom() && value <= end()), 365 "should point inside space"); 366 _compaction_top = value; 367 } 368 369 // Perform operations on the space needed after a compaction 370 // has been performed. 371 virtual void reset_after_compaction() {} 372 373 // Returns the next space (in the current generation) to be compacted in 374 // the global compaction order. Also is used to select the next 375 // space into which to compact. 376 377 virtual CompactibleSpace* next_compaction_space() const { 378 return _next_compaction_space; 379 } 380 381 void set_next_compaction_space(CompactibleSpace* csp) { 382 _next_compaction_space = csp; 383 } 384 385 // MarkSweep support phase2 386 387 // Start the process of compaction of the current space: compute 388 // post-compaction addresses, and insert forwarding pointers. The fields 389 // "cp->gen" and "cp->compaction_space" are the generation and space into 390 // which we are currently compacting. This call updates "cp" as necessary, 391 // and leaves the "compaction_top" of the final value of 392 // "cp->compaction_space" up-to-date. Offset tables may be updated in 393 // this phase as if the final copy had occurred; if so, "cp->threshold" 394 // indicates when the next such action should be taken. 395 virtual void prepare_for_compaction(CompactPoint* cp); 396 // MarkSweep support phase3 397 virtual void adjust_pointers(); 398 // MarkSweep support phase4 399 virtual void compact(); 400 401 // The maximum percentage of objects that can be dead in the compacted 402 // live part of a compacted space ("deadwood" support.) 403 virtual int allowed_dead_ratio() const { return 0; }; 404 405 // Some contiguous spaces may maintain some data structures that should 406 // be updated whenever an allocation crosses a boundary. This function 407 // returns the first such boundary. 408 // (The default implementation returns the end of the space, so the 409 // boundary is never crossed.) 410 virtual HeapWord* initialize_threshold() { return end(); } 411 412 // "q" is an object of the given "size" that should be forwarded; 413 // "cp" names the generation ("gen") and containing "this" (which must 414 // also equal "cp->space"). "compact_top" is where in "this" the 415 // next object should be forwarded to. If there is room in "this" for 416 // the object, insert an appropriate forwarding pointer in "q". 417 // If not, go to the next compaction space (there must 418 // be one, since compaction must succeed -- we go to the first space of 419 // the previous generation if necessary, updating "cp"), reset compact_top 420 // and then forward. In either case, returns the new value of "compact_top". 421 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" 422 // function of the then-current compaction space, and updates "cp->threshold 423 // accordingly". 424 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 425 HeapWord* compact_top); 426 427 // Return a size with adjusments as required of the space. 428 virtual size_t adjust_object_size_v(size_t size) const { return size; } 429 430 protected: 431 // Used during compaction. 432 HeapWord* _first_dead; 433 HeapWord* _end_of_live; 434 435 // Minimum size of a free block. 436 virtual size_t minimum_free_block_size() const = 0; 437 438 // This the function is invoked when an allocation of an object covering 439 // "start" to "end occurs crosses the threshold; returns the next 440 // threshold. (The default implementation does nothing.) 441 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 442 return end(); 443 } 444 445 // Requires "allowed_deadspace_words > 0", that "q" is the start of a 446 // free block of the given "word_len", and that "q", were it an object, 447 // would not move if forwared. If the size allows, fill the free 448 // block with an object, to prevent excessive compaction. Returns "true" 449 // iff the free region was made deadspace, and modifies 450 // "allowed_deadspace_words" to reflect the number of available deadspace 451 // words remaining after this operation. 452 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, 453 size_t word_len); 454 }; 455 456 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ 457 /* Compute the new addresses for the live objects and store it in the mark \ 458 * Used by universe::mark_sweep_phase2() \ 459 */ \ 460 HeapWord* compact_top; /* This is where we are currently compacting to. */ \ 461 \ 462 /* We're sure to be here before any objects are compacted into this \ 463 * space, so this is a good time to initialize this: \ 464 */ \ 465 set_compaction_top(bottom()); \ 466 \ 467 if (cp->space == NULL) { \ 468 assert(cp->gen != NULL, "need a generation"); \ 469 assert(cp->threshold == NULL, "just checking"); \ 470 assert(cp->gen->first_compaction_space() == this, "just checking"); \ 471 cp->space = cp->gen->first_compaction_space(); \ 472 compact_top = cp->space->bottom(); \ 473 cp->space->set_compaction_top(compact_top); \ 474 cp->threshold = cp->space->initialize_threshold(); \ 475 } else { \ 476 compact_top = cp->space->compaction_top(); \ 477 } \ 478 \ 479 /* We allow some amount of garbage towards the bottom of the space, so \ 480 * we don't start compacting before there is a significant gain to be made.\ 481 * Occasionally, we want to ensure a full compaction, which is determined \ 482 * by the MarkSweepAlwaysCompactCount parameter. \ 483 */ \ 484 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ 485 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ 486 \ 487 size_t allowed_deadspace = 0; \ 488 if (skip_dead) { \ 489 int ratio = allowed_dead_ratio(); \ 490 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ 491 } \ 492 \ 493 HeapWord* q = bottom(); \ 494 HeapWord* t = scan_limit(); \ 495 \ 496 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ 497 live object. */ \ 498 HeapWord* first_dead = end();/* The first dead object. */ \ 499 LiveRange* liveRange = NULL; /* The current live range, recorded in the \ 500 first header of preceding free area. */ \ 501 _first_dead = first_dead; \ 502 \ 503 const intx interval = PrefetchScanIntervalInBytes; \ 504 \ 505 while (q < t) { \ 506 assert(!block_is_obj(q) || \ 507 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ 508 oop(q)->mark()->has_bias_pattern(), \ 509 "these are the only valid states during a mark sweep"); \ 510 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 511 /* prefetch beyond q */ \ 512 Prefetch::write(q, interval); \ 513 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ 514 size_t size = block_size(q); \ 515 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 516 q += size; \ 517 end_of_live = q; \ 518 } else { \ 519 /* run over all the contiguous dead objects */ \ 520 HeapWord* end = q; \ 521 do { \ 522 /* prefetch beyond end */ \ 523 Prefetch::write(end, interval); \ 524 end += block_size(end); \ 525 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ 526 \ 527 /* see if we might want to pretend this object is alive so that \ 528 * we don't have to compact quite as often. \ 529 */ \ 530 if (allowed_deadspace > 0 && q == compact_top) { \ 531 size_t sz = pointer_delta(end, q); \ 532 if (insert_deadspace(allowed_deadspace, q, sz)) { \ 533 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ 534 q = end; \ 535 end_of_live = end; \ 536 continue; \ 537 } \ 538 } \ 539 \ 540 /* otherwise, it really is a free region. */ \ 541 \ 542 /* for the previous LiveRange, record the end of the live objects. */ \ 543 if (liveRange) { \ 544 liveRange->set_end(q); \ 545 } \ 546 \ 547 /* record the current LiveRange object. \ 548 * liveRange->start() is overlaid on the mark word. \ 549 */ \ 550 liveRange = (LiveRange*)q; \ 551 liveRange->set_start(end); \ 552 liveRange->set_end(end); \ 553 \ 554 /* see if this is the first dead region. */ \ 555 if (q < first_dead) { \ 556 first_dead = q; \ 557 } \ 558 \ 559 /* move on to the next object */ \ 560 q = end; \ 561 } \ 562 } \ 563 \ 564 assert(q == t, "just checking"); \ 565 if (liveRange != NULL) { \ 566 liveRange->set_end(q); \ 567 } \ 568 _end_of_live = end_of_live; \ 569 if (end_of_live < first_dead) { \ 570 first_dead = end_of_live; \ 571 } \ 572 _first_dead = first_dead; \ 573 \ 574 /* save the compaction_top of the compaction space. */ \ 575 cp->space->set_compaction_top(compact_top); \ 576 } 577 578 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 579 /* adjust all the interior pointers to point at the new locations of objects \ 580 * Used by MarkSweep::mark_sweep_phase3() */ \ 581 \ 582 HeapWord* q = bottom(); \ 583 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 584 \ 585 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 586 \ 587 if (q < t && _first_dead > q && \ 588 !oop(q)->is_gc_marked()) { \ 589 /* we have a chunk of the space which hasn't moved and we've \ 590 * reinitialized the mark word during the previous pass, so we can't \ 591 * use is_gc_marked for the traversal. */ \ 592 HeapWord* end = _first_dead; \ 593 \ 594 while (q < end) { \ 595 /* I originally tried to conjoin "block_start(q) == q" to the \ 596 * assertion below, but that doesn't work, because you can't \ 597 * accurately traverse previous objects to get to the current one \ 598 * after their pointers (including pointers into permGen) have been \ 599 * updated, until the actual compaction is done. dld, 4/00 */ \ 600 assert(block_is_obj(q), \ 601 "should be at block boundaries, and should be looking at objs"); \ 602 \ 603 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ 604 \ 605 /* point all the oops to the new location */ \ 606 size_t size = oop(q)->adjust_pointers(); \ 607 size = adjust_obj_size(size); \ 608 \ 609 debug_only(MarkSweep::check_interior_pointers()); \ 610 \ 611 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ 612 \ 613 q += size; \ 614 } \ 615 \ 616 if (_first_dead == t) { \ 617 q = t; \ 618 } else { \ 619 /* $$$ This is funky. Using this to read the previously written \ 620 * LiveRange. See also use below. */ \ 621 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 622 } \ 623 } \ 624 \ 625 const intx interval = PrefetchScanIntervalInBytes; \ 626 \ 627 debug_only(HeapWord* prev_q = NULL); \ 628 while (q < t) { \ 629 /* prefetch beyond q */ \ 630 Prefetch::write(q, interval); \ 631 if (oop(q)->is_gc_marked()) { \ 632 /* q is alive */ \ 633 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ 634 /* point all the oops to the new location */ \ 635 size_t size = oop(q)->adjust_pointers(); \ 636 size = adjust_obj_size(size); \ 637 debug_only(MarkSweep::check_interior_pointers()); \ 638 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ 639 debug_only(prev_q = q); \ 640 q += size; \ 641 } else { \ 642 /* q is not a live object, so its mark should point at the next \ 643 * live object */ \ 644 debug_only(prev_q = q); \ 645 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 646 assert(q > prev_q, "we should be moving forward through memory"); \ 647 } \ 648 } \ 649 \ 650 assert(q == t, "just checking"); \ 651 } 652 653 #define SCAN_AND_COMPACT(obj_size) { \ 654 /* Copy all live objects to their new location \ 655 * Used by MarkSweep::mark_sweep_phase4() */ \ 656 \ 657 HeapWord* q = bottom(); \ 658 HeapWord* const t = _end_of_live; \ 659 debug_only(HeapWord* prev_q = NULL); \ 660 \ 661 if (q < t && _first_dead > q && \ 662 !oop(q)->is_gc_marked()) { \ 663 debug_only( \ 664 /* we have a chunk of the space which hasn't moved and we've reinitialized the \ 665 * mark word during the previous pass, so we can't use is_gc_marked for the \ 666 * traversal. */ \ 667 HeapWord* const end = _first_dead; \ 668 \ 669 while (q < end) { \ 670 size_t size = obj_size(q); \ 671 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \ 672 debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \ 673 debug_only(prev_q = q); \ 674 q += size; \ 675 } \ 676 ) /* debug_only */ \ 677 \ 678 if (_first_dead == t) { \ 679 q = t; \ 680 } else { \ 681 /* $$$ Funky */ \ 682 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 683 } \ 684 } \ 685 \ 686 const intx scan_interval = PrefetchScanIntervalInBytes; \ 687 const intx copy_interval = PrefetchCopyIntervalInBytes; \ 688 while (q < t) { \ 689 if (!oop(q)->is_gc_marked()) { \ 690 /* mark is pointer to next marked oop */ \ 691 debug_only(prev_q = q); \ 692 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 693 assert(q > prev_q, "we should be moving forward through memory"); \ 694 } else { \ 695 /* prefetch beyond q */ \ 696 Prefetch::read(q, scan_interval); \ 697 \ 698 /* size and destination */ \ 699 size_t size = obj_size(q); \ 700 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 701 \ 702 /* prefetch beyond compaction_top */ \ 703 Prefetch::write(compaction_top, copy_interval); \ 704 \ 705 /* copy object and reinit its mark */ \ 706 debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \ 707 assert(q != compaction_top, "everything in this pass should be moving"); \ 708 Copy::aligned_conjoint_words(q, compaction_top, size); \ 709 oop(compaction_top)->init_mark(); \ 710 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 711 \ 712 debug_only(prev_q = q); \ 713 q += size; \ 714 } \ 715 } \ 716 \ 717 /* Reset space after compaction is complete */ \ 718 reset_after_compaction(); \ 719 /* We do this clear, below, since it has overloaded meanings for some */ \ 720 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 721 /* compacted into will have had their offset table thresholds updated */ \ 722 /* continuously, but those that weren't need to have their thresholds */ \ 723 /* re-initialized. Also mangles unused area for debugging. */ \ 724 if (is_empty()) { \ 725 clear(); \ 726 } else { \ 727 if (ZapUnusedHeapArea) mangle_unused_area(); \ 728 } \ 729 } 730 731 // A space in which the free area is contiguous. It therefore supports 732 // faster allocation, and compaction. 733 class ContiguousSpace: public CompactibleSpace { 734 friend class OneContigSpaceCardGeneration; 735 friend class VMStructs; 736 protected: 737 HeapWord* _top; 738 HeapWord* _concurrent_iteration_safe_limit; 739 740 // Allocation helpers (return NULL if full). 741 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); 742 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); 743 744 public: 745 virtual void initialize(MemRegion mr, bool clear_space); 746 747 // Accessors 748 HeapWord* top() const { return _top; } 749 void set_top(HeapWord* value) { _top = value; } 750 751 void set_saved_mark() { _saved_mark_word = top(); } 752 void reset_saved_mark() { _saved_mark_word = bottom(); } 753 754 virtual void clear(); 755 756 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 757 WaterMark top_mark() { return WaterMark(this, top()); } 758 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 759 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 760 761 void mangle_unused_area(); 762 void mangle_region(MemRegion mr); 763 764 // Size computations: sizes in bytes. 765 size_t capacity() const { return byte_size(bottom(), end()); } 766 size_t used() const { return byte_size(bottom(), top()); } 767 size_t free() const { return byte_size(top(), end()); } 768 769 // Override from space. 770 bool is_in(const void* p) const; 771 772 virtual bool is_free_block(const HeapWord* p) const; 773 774 // In a contiguous space we have a more obvious bound on what parts 775 // contain objects. 776 MemRegion used_region() const { return MemRegion(bottom(), top()); } 777 778 MemRegion used_region_at_save_marks() const { 779 return MemRegion(bottom(), saved_mark_word()); 780 } 781 782 // Allocation (return NULL if full) 783 virtual HeapWord* allocate(size_t word_size); 784 virtual HeapWord* par_allocate(size_t word_size); 785 786 virtual bool obj_allocated_since_save_marks(const oop obj) const { 787 return (HeapWord*)obj >= saved_mark_word(); 788 } 789 790 // Iteration 791 void oop_iterate(OopClosure* cl); 792 void oop_iterate(MemRegion mr, OopClosure* cl); 793 void object_iterate(ObjectClosure* blk); 794 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 795 // iterates on objects up to the safe limit 796 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 797 inline HeapWord* concurrent_iteration_safe_limit(); 798 // changes the safe limit, all objects from bottom() to the new 799 // limit should be properly initialized 800 inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit); 801 802 #ifndef SERIALGC 803 // In support of parallel oop_iterate. 804 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 805 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 806 807 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 808 #undef ContigSpace_PAR_OOP_ITERATE_DECL 809 #endif // SERIALGC 810 811 // Compaction support 812 virtual void reset_after_compaction() { 813 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 814 set_top(compaction_top()); 815 // set new iteration safe limit 816 set_concurrent_iteration_safe_limit(compaction_top()); 817 } 818 virtual size_t minimum_free_block_size() const { return 0; } 819 820 // Override. 821 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 822 CardTableModRefBS::PrecisionStyle precision, 823 HeapWord* boundary = NULL); 824 825 // Apply "blk->do_oop" to the addresses of all reference fields in objects 826 // starting with the _saved_mark_word, which was noted during a generation's 827 // save_marks and is required to denote the head of an object. 828 // Fields in objects allocated by applications of the closure 829 // *are* included in the iteration. 830 // Updates _saved_mark_word to point to just after the last object 831 // iterated over. 832 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 833 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 834 835 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 836 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 837 838 // Same as object_iterate, but starting from "mark", which is required 839 // to denote the start of an object. Objects allocated by 840 // applications of the closure *are* included in the iteration. 841 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 842 843 // Very inefficient implementation. 844 virtual HeapWord* block_start(const void* p) const; 845 size_t block_size(const HeapWord* p) const; 846 // If a block is in the allocated area, it is an object. 847 bool block_is_obj(const HeapWord* p) const { return p < top(); } 848 849 // Addresses for inlined allocation 850 HeapWord** top_addr() { return &_top; } 851 HeapWord** end_addr() { return &_end; } 852 853 // Overrides for more efficient compaction support. 854 void prepare_for_compaction(CompactPoint* cp); 855 856 // PrintHeapAtGC support. 857 virtual void print_on(outputStream* st) const; 858 859 // Checked dynamic downcasts. 860 virtual ContiguousSpace* toContiguousSpace() { 861 return this; 862 } 863 864 // Debugging 865 virtual void verify(bool allow_dirty) const; 866 867 // Used to increase collection frequency. "factor" of 0 means entire 868 // space. 869 void allocate_temporary_filler(int factor); 870 871 }; 872 873 874 // A dirty card to oop closure that does filtering. 875 // It knows how to filter out objects that are outside of the _boundary. 876 class Filtering_DCTOC : public DirtyCardToOopClosure { 877 protected: 878 // Override. 879 void walk_mem_region(MemRegion mr, 880 HeapWord* bottom, HeapWord* top); 881 882 // Walk the given memory region, from bottom to top, applying 883 // the given oop closure to (possibly) all objects found. The 884 // given oop closure may or may not be the same as the oop 885 // closure with which this closure was created, as it may 886 // be a filtering closure which makes use of the _boundary. 887 // We offer two signatures, so the FilteringClosure static type is 888 // apparent. 889 virtual void walk_mem_region_with_cl(MemRegion mr, 890 HeapWord* bottom, HeapWord* top, 891 OopClosure* cl) = 0; 892 virtual void walk_mem_region_with_cl(MemRegion mr, 893 HeapWord* bottom, HeapWord* top, 894 FilteringClosure* cl) = 0; 895 896 public: 897 Filtering_DCTOC(Space* sp, OopClosure* cl, 898 CardTableModRefBS::PrecisionStyle precision, 899 HeapWord* boundary) : 900 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 901 }; 902 903 // A dirty card to oop closure for contiguous spaces 904 // (ContiguousSpace and sub-classes). 905 // It is a FilteringClosure, as defined above, and it knows: 906 // 907 // 1. That the actual top of any area in a memory region 908 // contained by the space is bounded by the end of the contiguous 909 // region of the space. 910 // 2. That the space is really made up of objects and not just 911 // blocks. 912 913 class ContiguousSpaceDCTOC : public Filtering_DCTOC { 914 protected: 915 // Overrides. 916 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 917 918 virtual void walk_mem_region_with_cl(MemRegion mr, 919 HeapWord* bottom, HeapWord* top, 920 OopClosure* cl); 921 virtual void walk_mem_region_with_cl(MemRegion mr, 922 HeapWord* bottom, HeapWord* top, 923 FilteringClosure* cl); 924 925 public: 926 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, 927 CardTableModRefBS::PrecisionStyle precision, 928 HeapWord* boundary) : 929 Filtering_DCTOC(sp, cl, precision, boundary) 930 {} 931 }; 932 933 934 // Class EdenSpace describes eden-space in new generation. 935 936 class DefNewGeneration; 937 938 class EdenSpace : public ContiguousSpace { 939 friend class VMStructs; 940 private: 941 DefNewGeneration* _gen; 942 943 // _soft_end is used as a soft limit on allocation. As soft limits are 944 // reached, the slow-path allocation code can invoke other actions and then 945 // adjust _soft_end up to a new soft limit or to end(). 946 HeapWord* _soft_end; 947 948 public: 949 EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; } 950 951 // Get/set just the 'soft' limit. 952 HeapWord* soft_end() { return _soft_end; } 953 HeapWord** soft_end_addr() { return &_soft_end; } 954 void set_soft_end(HeapWord* value) { _soft_end = value; } 955 956 // Override. 957 void clear(); 958 959 // Set both the 'hard' and 'soft' limits (_end and _soft_end). 960 void set_end(HeapWord* value) { 961 set_soft_end(value); 962 ContiguousSpace::set_end(value); 963 } 964 965 // Allocation (return NULL if full) 966 HeapWord* allocate(size_t word_size); 967 HeapWord* par_allocate(size_t word_size); 968 }; 969 970 // Class ConcEdenSpace extends EdenSpace for the sake of safe 971 // allocation while soft-end is being modified concurrently 972 973 class ConcEdenSpace : public EdenSpace { 974 public: 975 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } 976 977 // Allocation (return NULL if full) 978 HeapWord* par_allocate(size_t word_size); 979 }; 980 981 982 // A ContigSpace that Supports an efficient "block_start" operation via 983 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 984 // other spaces.) This is the abstract base class for old generation 985 // (tenured, perm) spaces. 986 987 class OffsetTableContigSpace: public ContiguousSpace { 988 friend class VMStructs; 989 protected: 990 BlockOffsetArrayContigSpace _offsets; 991 Mutex _par_alloc_lock; 992 993 public: 994 // Constructor 995 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 996 MemRegion mr); 997 998 void set_bottom(HeapWord* value); 999 void set_end(HeapWord* value); 1000 1001 void clear(); 1002 1003 inline HeapWord* block_start(const void* p) const; 1004 1005 // Add offset table update. 1006 virtual inline HeapWord* allocate(size_t word_size); 1007 inline HeapWord* par_allocate(size_t word_size); 1008 1009 // MarkSweep support phase3 1010 virtual HeapWord* initialize_threshold(); 1011 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 1012 1013 virtual void print_on(outputStream* st) const; 1014 1015 // Debugging 1016 void verify(bool allow_dirty) const; 1017 1018 // Shared space support 1019 void serialize_block_offset_array_offsets(SerializeOopClosure* soc); 1020 }; 1021 1022 1023 // Class TenuredSpace is used by TenuredGeneration 1024 1025 class TenuredSpace: public OffsetTableContigSpace { 1026 friend class VMStructs; 1027 protected: 1028 // Mark sweep support 1029 int allowed_dead_ratio() const; 1030 public: 1031 // Constructor 1032 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 1033 MemRegion mr) : 1034 OffsetTableContigSpace(sharedOffsetArray, mr) {} 1035 }; 1036 1037 1038 // Class ContigPermSpace is used by CompactingPermGen 1039 1040 class ContigPermSpace: public OffsetTableContigSpace { 1041 friend class VMStructs; 1042 protected: 1043 // Mark sweep support 1044 int allowed_dead_ratio() const; 1045 public: 1046 // Constructor 1047 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : 1048 OffsetTableContigSpace(sharedOffsetArray, mr) {} 1049 };