1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP 26 #define SHARE_VM_MEMORY_SPACE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/blockOffsetTable.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/iterator.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/watermark.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/prefetch.hpp" 37 #include "utilities/workgroup.hpp" 38 #ifdef TARGET_OS_FAMILY_linux 39 # include "os_linux.inline.hpp" 40 #endif 41 #ifdef TARGET_OS_FAMILY_solaris 42 # include "os_solaris.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_windows 45 # include "os_windows.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_bsd 48 # include "os_bsd.inline.hpp" 49 #endif 50 51 // A space is an abstraction for the "storage units" backing 52 // up the generation abstraction. It includes specific 53 // implementations for keeping track of free and used space, 54 // for iterating over objects and free blocks, etc. 55 56 // Here's the Space hierarchy: 57 // 58 // - Space -- an asbtract base class describing a heap area 59 // - CompactibleSpace -- a space supporting compaction 60 // - CompactibleFreeListSpace -- (used for CMS generation) 61 // - ContiguousSpace -- a compactible space in which all free space 62 // is contiguous 63 // - EdenSpace -- contiguous space used as nursery 64 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 65 // - OffsetTableContigSpace -- contiguous space with a block offset array 66 // that allows "fast" block_start calls 67 // - TenuredSpace -- (used for TenuredGeneration) 68 // - ContigPermSpace -- an offset table contiguous space for perm gen 69 70 // Forward decls. 71 class Space; 72 class BlockOffsetArray; 73 class BlockOffsetArrayContigSpace; 74 class Generation; 75 class CompactibleSpace; 76 class BlockOffsetTable; 77 class GenRemSet; 78 class CardTableRS; 79 class DirtyCardToOopClosure; 80 81 // An oop closure that is circumscribed by a filtering memory region. 82 class SpaceMemRegionOopsIterClosure: public OopClosure { 83 private: 84 OopClosure* _cl; 85 MemRegion _mr; 86 protected: 87 template <class T> void do_oop_work(T* p) { 88 if (_mr.contains(p)) { 89 _cl->do_oop(p); 90 } 91 } 92 public: 93 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): 94 _cl(cl), _mr(mr) {} 95 virtual void do_oop(oop* p); 96 virtual void do_oop(narrowOop* p); 97 }; 98 99 // A Space describes a heap area. Class Space is an abstract 100 // base class. 101 // 102 // Space supports allocation, size computation and GC support is provided. 103 // 104 // Invariant: bottom() and end() are on page_size boundaries and 105 // bottom() <= top() <= end() 106 // top() is inclusive and end() is exclusive. 107 108 class Space: public CHeapObj { 109 friend class VMStructs; 110 protected: 111 HeapWord* _bottom; 112 HeapWord* _end; 113 114 // Used in support of save_marks() 115 HeapWord* _saved_mark_word; 116 117 MemRegionClosure* _preconsumptionDirtyCardClosure; 118 119 // A sequential tasks done structure. This supports 120 // parallel GC, where we have threads dynamically 121 // claiming sub-tasks from a larger parallel task. 122 SequentialSubTasksDone _par_seq_tasks; 123 124 Space(): 125 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } 126 127 public: 128 // Accessors 129 HeapWord* bottom() const { return _bottom; } 130 HeapWord* end() const { return _end; } 131 virtual void set_bottom(HeapWord* value) { _bottom = value; } 132 virtual void set_end(HeapWord* value) { _end = value; } 133 134 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 135 136 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 137 138 MemRegionClosure* preconsumptionDirtyCardClosure() const { 139 return _preconsumptionDirtyCardClosure; 140 } 141 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 142 _preconsumptionDirtyCardClosure = cl; 143 } 144 145 // Returns a subregion of the space containing all the objects in 146 // the space. 147 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } 148 149 // Returns a region that is guaranteed to contain (at least) all objects 150 // allocated at the time of the last call to "save_marks". If the space 151 // initializes its DirtyCardToOopClosure's specifying the "contig" option 152 // (that is, if the space is contiguous), then this region must contain only 153 // such objects: the memregion will be from the bottom of the region to the 154 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 155 // the space must distiguish between objects in the region allocated before 156 // and after the call to save marks. 157 virtual MemRegion used_region_at_save_marks() const { 158 return MemRegion(bottom(), saved_mark_word()); 159 } 160 161 // Initialization. 162 // "initialize" should be called once on a space, before it is used for 163 // any purpose. The "mr" arguments gives the bounds of the space, and 164 // the "clear_space" argument should be true unless the memory in "mr" is 165 // known to be zeroed. 166 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 167 168 // The "clear" method must be called on a region that may have 169 // had allocation performed in it, but is now to be considered empty. 170 virtual void clear(bool mangle_space); 171 172 // For detecting GC bugs. Should only be called at GC boundaries, since 173 // some unused space may be used as scratch space during GC's. 174 // Default implementation does nothing. We also call this when expanding 175 // a space to satisfy an allocation request. See bug #4668531 176 virtual void mangle_unused_area() {} 177 virtual void mangle_unused_area_complete() {} 178 virtual void mangle_region(MemRegion mr) {} 179 180 // Testers 181 bool is_empty() const { return used() == 0; } 182 bool not_empty() const { return used() > 0; } 183 184 // Returns true iff the given the space contains the 185 // given address as part of an allocated object. For 186 // ceratin kinds of spaces, this might be a potentially 187 // expensive operation. To prevent performance problems 188 // on account of its inadvertent use in product jvm's, 189 // we restrict its use to assertion checks only. 190 virtual bool is_in(const void* p) const; 191 192 // Returns true iff the given reserved memory of the space contains the 193 // given address. 194 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 195 196 // Returns true iff the given block is not allocated. 197 virtual bool is_free_block(const HeapWord* p) const = 0; 198 199 // Test whether p is double-aligned 200 static bool is_aligned(void* p) { 201 return ((intptr_t)p & (sizeof(double)-1)) == 0; 202 } 203 204 // Size computations. Sizes are in bytes. 205 size_t capacity() const { return byte_size(bottom(), end()); } 206 virtual size_t used() const = 0; 207 virtual size_t free() const = 0; 208 209 // Iterate over all the ref-containing fields of all objects in the 210 // space, calling "cl.do_oop" on each. Fields in objects allocated by 211 // applications of the closure are not included in the iteration. 212 virtual void oop_iterate(OopClosure* cl); 213 214 // Same as above, restricted to the intersection of a memory region and 215 // the space. Fields in objects allocated by applications of the closure 216 // are not included in the iteration. 217 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; 218 219 // Iterate over all objects in the space, calling "cl.do_object" on 220 // each. Objects allocated by applications of the closure are not 221 // included in the iteration. 222 virtual void object_iterate(ObjectClosure* blk) = 0; 223 // Similar to object_iterate() except only iterates over 224 // objects whose internal references point to objects in the space. 225 virtual void safe_object_iterate(ObjectClosure* blk) = 0; 226 227 // Iterate over all objects that intersect with mr, calling "cl->do_object" 228 // on each. There is an exception to this: if this closure has already 229 // been invoked on an object, it may skip such objects in some cases. This is 230 // Most likely to happen in an "upwards" (ascending address) iteration of 231 // MemRegions. 232 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 233 234 // Iterate over as many initialized objects in the space as possible, 235 // calling "cl.do_object_careful" on each. Return NULL if all objects 236 // in the space (at the start of the iteration) were iterated over. 237 // Return an address indicating the extent of the iteration in the 238 // event that the iteration had to return because of finding an 239 // uninitialized object in the space, or if the closure "cl" 240 // signalled early termination. 241 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 242 virtual HeapWord* object_iterate_careful_m(MemRegion mr, 243 ObjectClosureCareful* cl); 244 245 // Create and return a new dirty card to oop closure. Can be 246 // overriden to return the appropriate type of closure 247 // depending on the type of space in which the closure will 248 // operate. ResourceArea allocated. 249 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 250 CardTableModRefBS::PrecisionStyle precision, 251 HeapWord* boundary = NULL); 252 253 // If "p" is in the space, returns the address of the start of the 254 // "block" that contains "p". We say "block" instead of "object" since 255 // some heaps may not pack objects densely; a chunk may either be an 256 // object or a non-object. If "p" is not in the space, return NULL. 257 virtual HeapWord* block_start_const(const void* p) const = 0; 258 259 // The non-const version may have benevolent side effects on the data 260 // structure supporting these calls, possibly speeding up future calls. 261 // The default implementation, however, is simply to call the const 262 // version. 263 inline virtual HeapWord* block_start(const void* p); 264 265 // Requires "addr" to be the start of a chunk, and returns its size. 266 // "addr + size" is required to be the start of a new chunk, or the end 267 // of the active area of the heap. 268 virtual size_t block_size(const HeapWord* addr) const = 0; 269 270 // Requires "addr" to be the start of a block, and returns "TRUE" iff 271 // the block is an object. 272 virtual bool block_is_obj(const HeapWord* addr) const = 0; 273 274 // Requires "addr" to be the start of a block, and returns "TRUE" iff 275 // the block is an object and the object is alive. 276 virtual bool obj_is_alive(const HeapWord* addr) const; 277 278 // Allocation (return NULL if full). Assumes the caller has established 279 // mutually exclusive access to the space. 280 virtual HeapWord* allocate(size_t word_size) = 0; 281 282 // Allocation (return NULL if full). Enforces mutual exclusion internally. 283 virtual HeapWord* par_allocate(size_t word_size) = 0; 284 285 // Returns true if this object has been allocated since a 286 // generation's "save_marks" call. 287 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; 288 289 // Mark-sweep-compact support: all spaces can update pointers to objects 290 // moving as a part of compaction. 291 virtual void adjust_pointers(); 292 293 // PrintHeapAtGC support 294 virtual void print() const; 295 virtual void print_on(outputStream* st) const; 296 virtual void print_short() const; 297 virtual void print_short_on(outputStream* st) const; 298 299 300 // Accessor for parallel sequential tasks. 301 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } 302 303 // IF "this" is a ContiguousSpace, return it, else return NULL. 304 virtual ContiguousSpace* toContiguousSpace() { 305 return NULL; 306 } 307 308 // Debugging 309 virtual void verify(bool allow_dirty) const = 0; 310 }; 311 312 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 313 // OopClosure to (the addresses of) all the ref-containing fields that could 314 // be modified by virtue of the given MemRegion being dirty. (Note that 315 // because of the imprecise nature of the write barrier, this may iterate 316 // over oops beyond the region.) 317 // This base type for dirty card to oop closures handles memory regions 318 // in non-contiguous spaces with no boundaries, and should be sub-classed 319 // to support other space types. See ContiguousDCTOC for a sub-class 320 // that works with ContiguousSpaces. 321 322 class DirtyCardToOopClosure: public MemRegionClosureRO { 323 protected: 324 OopClosure* _cl; 325 Space* _sp; 326 CardTableModRefBS::PrecisionStyle _precision; 327 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 328 // pointing below boundary. 329 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 330 // a downwards traversal; this is the 331 // lowest location already done (or, 332 // alternatively, the lowest address that 333 // shouldn't be done again. NULL means infinity.) 334 NOT_PRODUCT(HeapWord* _last_bottom;) 335 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 336 337 // Get the actual top of the area on which the closure will 338 // operate, given where the top is assumed to be (the end of the 339 // memory region passed to do_MemRegion) and where the object 340 // at the top is assumed to start. For example, an object may 341 // start at the top but actually extend past the assumed top, 342 // in which case the top becomes the end of the object. 343 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 344 345 // Walk the given memory region from bottom to (actual) top 346 // looking for objects and applying the oop closure (_cl) to 347 // them. The base implementation of this treats the area as 348 // blocks, where a block may or may not be an object. Sub- 349 // classes should override this to provide more accurate 350 // or possibly more efficient walking. 351 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 352 353 public: 354 DirtyCardToOopClosure(Space* sp, OopClosure* cl, 355 CardTableModRefBS::PrecisionStyle precision, 356 HeapWord* boundary) : 357 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 358 _min_done(NULL) { 359 NOT_PRODUCT(_last_bottom = NULL); 360 NOT_PRODUCT(_last_explicit_min_done = NULL); 361 } 362 363 void do_MemRegion(MemRegion mr); 364 365 void set_min_done(HeapWord* min_done) { 366 _min_done = min_done; 367 NOT_PRODUCT(_last_explicit_min_done = _min_done); 368 } 369 #ifndef PRODUCT 370 void set_last_bottom(HeapWord* last_bottom) { 371 _last_bottom = last_bottom; 372 } 373 #endif 374 }; 375 376 // A structure to represent a point at which objects are being copied 377 // during compaction. 378 class CompactPoint : public StackObj { 379 public: 380 Generation* gen; 381 CompactibleSpace* space; 382 HeapWord* threshold; 383 CompactPoint(Generation* _gen, CompactibleSpace* _space, 384 HeapWord* _threshold) : 385 gen(_gen), space(_space), threshold(_threshold) {} 386 }; 387 388 389 // A space that supports compaction operations. This is usually, but not 390 // necessarily, a space that is normally contiguous. But, for example, a 391 // free-list-based space whose normal collection is a mark-sweep without 392 // compaction could still support compaction in full GC's. 393 394 class CompactibleSpace: public Space { 395 friend class VMStructs; 396 friend class CompactibleFreeListSpace; 397 friend class CompactingPermGenGen; 398 friend class CMSPermGenGen; 399 private: 400 HeapWord* _compaction_top; 401 CompactibleSpace* _next_compaction_space; 402 403 public: 404 CompactibleSpace() : 405 _compaction_top(NULL), _next_compaction_space(NULL) {} 406 407 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 408 virtual void clear(bool mangle_space); 409 410 // Used temporarily during a compaction phase to hold the value 411 // top should have when compaction is complete. 412 HeapWord* compaction_top() const { return _compaction_top; } 413 414 void set_compaction_top(HeapWord* value) { 415 assert(value == NULL || (value >= bottom() && value <= end()), 416 "should point inside space"); 417 _compaction_top = value; 418 } 419 420 // Perform operations on the space needed after a compaction 421 // has been performed. 422 virtual void reset_after_compaction() {} 423 424 // Returns the next space (in the current generation) to be compacted in 425 // the global compaction order. Also is used to select the next 426 // space into which to compact. 427 428 virtual CompactibleSpace* next_compaction_space() const { 429 return _next_compaction_space; 430 } 431 432 void set_next_compaction_space(CompactibleSpace* csp) { 433 _next_compaction_space = csp; 434 } 435 436 // MarkSweep support phase2 437 438 // Start the process of compaction of the current space: compute 439 // post-compaction addresses, and insert forwarding pointers. The fields 440 // "cp->gen" and "cp->compaction_space" are the generation and space into 441 // which we are currently compacting. This call updates "cp" as necessary, 442 // and leaves the "compaction_top" of the final value of 443 // "cp->compaction_space" up-to-date. Offset tables may be updated in 444 // this phase as if the final copy had occurred; if so, "cp->threshold" 445 // indicates when the next such action should be taken. 446 virtual void prepare_for_compaction(CompactPoint* cp); 447 // MarkSweep support phase3 448 virtual void adjust_pointers(); 449 // MarkSweep support phase4 450 virtual void compact(); 451 452 // The maximum percentage of objects that can be dead in the compacted 453 // live part of a compacted space ("deadwood" support.) 454 virtual size_t allowed_dead_ratio() const { return 0; }; 455 456 // Some contiguous spaces may maintain some data structures that should 457 // be updated whenever an allocation crosses a boundary. This function 458 // returns the first such boundary. 459 // (The default implementation returns the end of the space, so the 460 // boundary is never crossed.) 461 virtual HeapWord* initialize_threshold() { return end(); } 462 463 // "q" is an object of the given "size" that should be forwarded; 464 // "cp" names the generation ("gen") and containing "this" (which must 465 // also equal "cp->space"). "compact_top" is where in "this" the 466 // next object should be forwarded to. If there is room in "this" for 467 // the object, insert an appropriate forwarding pointer in "q". 468 // If not, go to the next compaction space (there must 469 // be one, since compaction must succeed -- we go to the first space of 470 // the previous generation if necessary, updating "cp"), reset compact_top 471 // and then forward. In either case, returns the new value of "compact_top". 472 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" 473 // function of the then-current compaction space, and updates "cp->threshold 474 // accordingly". 475 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 476 HeapWord* compact_top); 477 478 // Return a size with adjusments as required of the space. 479 virtual size_t adjust_object_size_v(size_t size) const { return size; } 480 481 protected: 482 // Used during compaction. 483 HeapWord* _first_dead; 484 HeapWord* _end_of_live; 485 486 // Minimum size of a free block. 487 virtual size_t minimum_free_block_size() const = 0; 488 489 // This the function is invoked when an allocation of an object covering 490 // "start" to "end occurs crosses the threshold; returns the next 491 // threshold. (The default implementation does nothing.) 492 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 493 return end(); 494 } 495 496 // Requires "allowed_deadspace_words > 0", that "q" is the start of a 497 // free block of the given "word_len", and that "q", were it an object, 498 // would not move if forwared. If the size allows, fill the free 499 // block with an object, to prevent excessive compaction. Returns "true" 500 // iff the free region was made deadspace, and modifies 501 // "allowed_deadspace_words" to reflect the number of available deadspace 502 // words remaining after this operation. 503 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, 504 size_t word_len); 505 }; 506 507 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ 508 /* Compute the new addresses for the live objects and store it in the mark \ 509 * Used by universe::mark_sweep_phase2() \ 510 */ \ 511 HeapWord* compact_top; /* This is where we are currently compacting to. */ \ 512 \ 513 /* We're sure to be here before any objects are compacted into this \ 514 * space, so this is a good time to initialize this: \ 515 */ \ 516 set_compaction_top(bottom()); \ 517 \ 518 if (cp->space == NULL) { \ 519 assert(cp->gen != NULL, "need a generation"); \ 520 assert(cp->threshold == NULL, "just checking"); \ 521 assert(cp->gen->first_compaction_space() == this, "just checking"); \ 522 cp->space = cp->gen->first_compaction_space(); \ 523 compact_top = cp->space->bottom(); \ 524 cp->space->set_compaction_top(compact_top); \ 525 cp->threshold = cp->space->initialize_threshold(); \ 526 } else { \ 527 compact_top = cp->space->compaction_top(); \ 528 } \ 529 \ 530 /* We allow some amount of garbage towards the bottom of the space, so \ 531 * we don't start compacting before there is a significant gain to be made.\ 532 * Occasionally, we want to ensure a full compaction, which is determined \ 533 * by the MarkSweepAlwaysCompactCount parameter. \ 534 */ \ 535 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ 536 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ 537 \ 538 size_t allowed_deadspace = 0; \ 539 if (skip_dead) { \ 540 const size_t ratio = allowed_dead_ratio(); \ 541 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ 542 } \ 543 \ 544 HeapWord* q = bottom(); \ 545 HeapWord* t = scan_limit(); \ 546 \ 547 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ 548 live object. */ \ 549 HeapWord* first_dead = end();/* The first dead object. */ \ 550 LiveRange* liveRange = NULL; /* The current live range, recorded in the \ 551 first header of preceding free area. */ \ 552 _first_dead = first_dead; \ 553 \ 554 const intx interval = PrefetchScanIntervalInBytes; \ 555 \ 556 while (q < t) { \ 557 assert(!block_is_obj(q) || \ 558 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ 559 oop(q)->mark()->has_bias_pattern(), \ 560 "these are the only valid states during a mark sweep"); \ 561 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 562 /* prefetch beyond q */ \ 563 Prefetch::write(q, interval); \ 564 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ 565 size_t size = block_size(q); \ 566 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 567 q += size; \ 568 end_of_live = q; \ 569 } else { \ 570 /* run over all the contiguous dead objects */ \ 571 HeapWord* end = q; \ 572 do { \ 573 /* prefetch beyond end */ \ 574 Prefetch::write(end, interval); \ 575 end += block_size(end); \ 576 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ 577 \ 578 /* see if we might want to pretend this object is alive so that \ 579 * we don't have to compact quite as often. \ 580 */ \ 581 if (allowed_deadspace > 0 && q == compact_top) { \ 582 size_t sz = pointer_delta(end, q); \ 583 if (insert_deadspace(allowed_deadspace, q, sz)) { \ 584 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ 585 q = end; \ 586 end_of_live = end; \ 587 continue; \ 588 } \ 589 } \ 590 \ 591 /* otherwise, it really is a free region. */ \ 592 \ 593 /* for the previous LiveRange, record the end of the live objects. */ \ 594 if (liveRange) { \ 595 liveRange->set_end(q); \ 596 } \ 597 \ 598 /* record the current LiveRange object. \ 599 * liveRange->start() is overlaid on the mark word. \ 600 */ \ 601 liveRange = (LiveRange*)q; \ 602 liveRange->set_start(end); \ 603 liveRange->set_end(end); \ 604 \ 605 /* see if this is the first dead region. */ \ 606 if (q < first_dead) { \ 607 first_dead = q; \ 608 } \ 609 \ 610 /* move on to the next object */ \ 611 q = end; \ 612 } \ 613 } \ 614 \ 615 assert(q == t, "just checking"); \ 616 if (liveRange != NULL) { \ 617 liveRange->set_end(q); \ 618 } \ 619 _end_of_live = end_of_live; \ 620 if (end_of_live < first_dead) { \ 621 first_dead = end_of_live; \ 622 } \ 623 _first_dead = first_dead; \ 624 \ 625 /* save the compaction_top of the compaction space. */ \ 626 cp->space->set_compaction_top(compact_top); \ 627 } 628 629 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 630 /* adjust all the interior pointers to point at the new locations of objects \ 631 * Used by MarkSweep::mark_sweep_phase3() */ \ 632 \ 633 HeapWord* q = bottom(); \ 634 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 635 \ 636 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 637 \ 638 if (q < t && _first_dead > q && \ 639 !oop(q)->is_gc_marked()) { \ 640 /* we have a chunk of the space which hasn't moved and we've \ 641 * reinitialized the mark word during the previous pass, so we can't \ 642 * use is_gc_marked for the traversal. */ \ 643 HeapWord* end = _first_dead; \ 644 \ 645 while (q < end) { \ 646 /* I originally tried to conjoin "block_start(q) == q" to the \ 647 * assertion below, but that doesn't work, because you can't \ 648 * accurately traverse previous objects to get to the current one \ 649 * after their pointers (including pointers into permGen) have been \ 650 * updated, until the actual compaction is done. dld, 4/00 */ \ 651 assert(block_is_obj(q), \ 652 "should be at block boundaries, and should be looking at objs"); \ 653 \ 654 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ 655 \ 656 /* point all the oops to the new location */ \ 657 size_t size = oop(q)->adjust_pointers(); \ 658 size = adjust_obj_size(size); \ 659 \ 660 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ 661 \ 662 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ 663 \ 664 q += size; \ 665 } \ 666 \ 667 if (_first_dead == t) { \ 668 q = t; \ 669 } else { \ 670 /* $$$ This is funky. Using this to read the previously written \ 671 * LiveRange. See also use below. */ \ 672 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 673 } \ 674 } \ 675 \ 676 const intx interval = PrefetchScanIntervalInBytes; \ 677 \ 678 debug_only(HeapWord* prev_q = NULL); \ 679 while (q < t) { \ 680 /* prefetch beyond q */ \ 681 Prefetch::write(q, interval); \ 682 if (oop(q)->is_gc_marked()) { \ 683 /* q is alive */ \ 684 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ 685 /* point all the oops to the new location */ \ 686 size_t size = oop(q)->adjust_pointers(); \ 687 size = adjust_obj_size(size); \ 688 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ 689 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ 690 debug_only(prev_q = q); \ 691 q += size; \ 692 } else { \ 693 /* q is not a live object, so its mark should point at the next \ 694 * live object */ \ 695 debug_only(prev_q = q); \ 696 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 697 assert(q > prev_q, "we should be moving forward through memory"); \ 698 } \ 699 } \ 700 \ 701 assert(q == t, "just checking"); \ 702 } 703 704 #define SCAN_AND_COMPACT(obj_size) { \ 705 /* Copy all live objects to their new location \ 706 * Used by MarkSweep::mark_sweep_phase4() */ \ 707 \ 708 HeapWord* q = bottom(); \ 709 HeapWord* const t = _end_of_live; \ 710 debug_only(HeapWord* prev_q = NULL); \ 711 \ 712 if (q < t && _first_dead > q && \ 713 !oop(q)->is_gc_marked()) { \ 714 debug_only( \ 715 /* we have a chunk of the space which hasn't moved and we've reinitialized \ 716 * the mark word during the previous pass, so we can't use is_gc_marked for \ 717 * the traversal. */ \ 718 HeapWord* const end = _first_dead; \ 719 \ 720 while (q < end) { \ 721 size_t size = obj_size(q); \ 722 assert(!oop(q)->is_gc_marked(), \ 723 "should be unmarked (special dense prefix handling)"); \ 724 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ 725 debug_only(prev_q = q); \ 726 q += size; \ 727 } \ 728 ) /* debug_only */ \ 729 \ 730 if (_first_dead == t) { \ 731 q = t; \ 732 } else { \ 733 /* $$$ Funky */ \ 734 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 735 } \ 736 } \ 737 \ 738 const intx scan_interval = PrefetchScanIntervalInBytes; \ 739 const intx copy_interval = PrefetchCopyIntervalInBytes; \ 740 while (q < t) { \ 741 if (!oop(q)->is_gc_marked()) { \ 742 /* mark is pointer to next marked oop */ \ 743 debug_only(prev_q = q); \ 744 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 745 assert(q > prev_q, "we should be moving forward through memory"); \ 746 } else { \ 747 /* prefetch beyond q */ \ 748 Prefetch::read(q, scan_interval); \ 749 \ 750 /* size and destination */ \ 751 size_t size = obj_size(q); \ 752 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 753 \ 754 /* prefetch beyond compaction_top */ \ 755 Prefetch::write(compaction_top, copy_interval); \ 756 \ 757 /* copy object and reinit its mark */ \ 758 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ 759 compaction_top)); \ 760 assert(q != compaction_top, "everything in this pass should be moving"); \ 761 Copy::aligned_conjoint_words(q, compaction_top, size); \ 762 oop(compaction_top)->init_mark(); \ 763 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 764 \ 765 debug_only(prev_q = q); \ 766 q += size; \ 767 } \ 768 } \ 769 \ 770 /* Let's remember if we were empty before we did the compaction. */ \ 771 bool was_empty = used_region().is_empty(); \ 772 /* Reset space after compaction is complete */ \ 773 reset_after_compaction(); \ 774 /* We do this clear, below, since it has overloaded meanings for some */ \ 775 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 776 /* compacted into will have had their offset table thresholds updated */ \ 777 /* continuously, but those that weren't need to have their thresholds */ \ 778 /* re-initialized. Also mangles unused area for debugging. */ \ 779 if (used_region().is_empty()) { \ 780 if (!was_empty) clear(SpaceDecorator::Mangle); \ 781 } else { \ 782 if (ZapUnusedHeapArea) mangle_unused_area(); \ 783 } \ 784 } 785 786 class GenSpaceMangler; 787 788 // A space in which the free area is contiguous. It therefore supports 789 // faster allocation, and compaction. 790 class ContiguousSpace: public CompactibleSpace { 791 friend class OneContigSpaceCardGeneration; 792 friend class VMStructs; 793 protected: 794 HeapWord* _top; 795 HeapWord* _concurrent_iteration_safe_limit; 796 // A helper for mangling the unused area of the space in debug builds. 797 GenSpaceMangler* _mangler; 798 799 GenSpaceMangler* mangler() { return _mangler; } 800 801 // Allocation helpers (return NULL if full). 802 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); 803 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); 804 805 public: 806 ContiguousSpace(); 807 ~ContiguousSpace(); 808 809 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 810 virtual void clear(bool mangle_space); 811 812 // Accessors 813 HeapWord* top() const { return _top; } 814 void set_top(HeapWord* value) { _top = value; } 815 816 virtual void set_saved_mark() { _saved_mark_word = top(); } 817 void reset_saved_mark() { _saved_mark_word = bottom(); } 818 819 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 820 WaterMark top_mark() { return WaterMark(this, top()); } 821 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 822 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 823 824 // In debug mode mangle (write it with a particular bit 825 // pattern) the unused part of a space. 826 827 // Used to save the an address in a space for later use during mangling. 828 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 829 // Used to save the space's current top for later use during mangling. 830 void set_top_for_allocations() PRODUCT_RETURN; 831 832 // Mangle regions in the space from the current top up to the 833 // previously mangled part of the space. 834 void mangle_unused_area() PRODUCT_RETURN; 835 // Mangle [top, end) 836 void mangle_unused_area_complete() PRODUCT_RETURN; 837 // Mangle the given MemRegion. 838 void mangle_region(MemRegion mr) PRODUCT_RETURN; 839 840 // Do some sparse checking on the area that should have been mangled. 841 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 842 // Check the complete area that should have been mangled. 843 // This code may be NULL depending on the macro DEBUG_MANGLING. 844 void check_mangled_unused_area_complete() PRODUCT_RETURN; 845 846 // Size computations: sizes in bytes. 847 size_t capacity() const { return byte_size(bottom(), end()); } 848 size_t used() const { return byte_size(bottom(), top()); } 849 size_t free() const { return byte_size(top(), end()); } 850 851 // Override from space. 852 bool is_in(const void* p) const; 853 854 virtual bool is_free_block(const HeapWord* p) const; 855 856 // In a contiguous space we have a more obvious bound on what parts 857 // contain objects. 858 MemRegion used_region() const { return MemRegion(bottom(), top()); } 859 860 MemRegion used_region_at_save_marks() const { 861 return MemRegion(bottom(), saved_mark_word()); 862 } 863 864 // Allocation (return NULL if full) 865 virtual HeapWord* allocate(size_t word_size); 866 virtual HeapWord* par_allocate(size_t word_size); 867 868 virtual bool obj_allocated_since_save_marks(const oop obj) const { 869 return (HeapWord*)obj >= saved_mark_word(); 870 } 871 872 // Iteration 873 void oop_iterate(OopClosure* cl); 874 void oop_iterate(MemRegion mr, OopClosure* cl); 875 void object_iterate(ObjectClosure* blk); 876 // For contiguous spaces this method will iterate safely over objects 877 // in the space (i.e., between bottom and top) when at a safepoint. 878 void safe_object_iterate(ObjectClosure* blk); 879 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 880 // iterates on objects up to the safe limit 881 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); 882 inline HeapWord* concurrent_iteration_safe_limit(); 883 // changes the safe limit, all objects from bottom() to the new 884 // limit should be properly initialized 885 inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit); 886 887 #ifndef SERIALGC 888 // In support of parallel oop_iterate. 889 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 890 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 891 892 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) 893 #undef ContigSpace_PAR_OOP_ITERATE_DECL 894 #endif // SERIALGC 895 896 // Compaction support 897 virtual void reset_after_compaction() { 898 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 899 set_top(compaction_top()); 900 // set new iteration safe limit 901 set_concurrent_iteration_safe_limit(compaction_top()); 902 } 903 virtual size_t minimum_free_block_size() const { return 0; } 904 905 // Override. 906 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 907 CardTableModRefBS::PrecisionStyle precision, 908 HeapWord* boundary = NULL); 909 910 // Apply "blk->do_oop" to the addresses of all reference fields in objects 911 // starting with the _saved_mark_word, which was noted during a generation's 912 // save_marks and is required to denote the head of an object. 913 // Fields in objects allocated by applications of the closure 914 // *are* included in the iteration. 915 // Updates _saved_mark_word to point to just after the last object 916 // iterated over. 917 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 918 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 919 920 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) 921 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL 922 923 // Same as object_iterate, but starting from "mark", which is required 924 // to denote the start of an object. Objects allocated by 925 // applications of the closure *are* included in the iteration. 926 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 927 928 // Very inefficient implementation. 929 virtual HeapWord* block_start_const(const void* p) const; 930 size_t block_size(const HeapWord* p) const; 931 // If a block is in the allocated area, it is an object. 932 bool block_is_obj(const HeapWord* p) const { return p < top(); } 933 934 // Addresses for inlined allocation 935 HeapWord** top_addr() { return &_top; } 936 HeapWord** end_addr() { return &_end; } 937 938 // Overrides for more efficient compaction support. 939 void prepare_for_compaction(CompactPoint* cp); 940 941 // PrintHeapAtGC support. 942 virtual void print_on(outputStream* st) const; 943 944 // Checked dynamic downcasts. 945 virtual ContiguousSpace* toContiguousSpace() { 946 return this; 947 } 948 949 // Debugging 950 virtual void verify(bool allow_dirty) const; 951 952 // Used to increase collection frequency. "factor" of 0 means entire 953 // space. 954 void allocate_temporary_filler(int factor); 955 956 }; 957 958 959 // A dirty card to oop closure that does filtering. 960 // It knows how to filter out objects that are outside of the _boundary. 961 class Filtering_DCTOC : public DirtyCardToOopClosure { 962 protected: 963 // Override. 964 void walk_mem_region(MemRegion mr, 965 HeapWord* bottom, HeapWord* top); 966 967 // Walk the given memory region, from bottom to top, applying 968 // the given oop closure to (possibly) all objects found. The 969 // given oop closure may or may not be the same as the oop 970 // closure with which this closure was created, as it may 971 // be a filtering closure which makes use of the _boundary. 972 // We offer two signatures, so the FilteringClosure static type is 973 // apparent. 974 virtual void walk_mem_region_with_cl(MemRegion mr, 975 HeapWord* bottom, HeapWord* top, 976 OopClosure* cl) = 0; 977 virtual void walk_mem_region_with_cl(MemRegion mr, 978 HeapWord* bottom, HeapWord* top, 979 FilteringClosure* cl) = 0; 980 981 public: 982 Filtering_DCTOC(Space* sp, OopClosure* cl, 983 CardTableModRefBS::PrecisionStyle precision, 984 HeapWord* boundary) : 985 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 986 }; 987 988 // A dirty card to oop closure for contiguous spaces 989 // (ContiguousSpace and sub-classes). 990 // It is a FilteringClosure, as defined above, and it knows: 991 // 992 // 1. That the actual top of any area in a memory region 993 // contained by the space is bounded by the end of the contiguous 994 // region of the space. 995 // 2. That the space is really made up of objects and not just 996 // blocks. 997 998 class ContiguousSpaceDCTOC : public Filtering_DCTOC { 999 protected: 1000 // Overrides. 1001 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 1002 1003 virtual void walk_mem_region_with_cl(MemRegion mr, 1004 HeapWord* bottom, HeapWord* top, 1005 OopClosure* cl); 1006 virtual void walk_mem_region_with_cl(MemRegion mr, 1007 HeapWord* bottom, HeapWord* top, 1008 FilteringClosure* cl); 1009 1010 public: 1011 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, 1012 CardTableModRefBS::PrecisionStyle precision, 1013 HeapWord* boundary) : 1014 Filtering_DCTOC(sp, cl, precision, boundary) 1015 {} 1016 }; 1017 1018 1019 // Class EdenSpace describes eden-space in new generation. 1020 1021 class DefNewGeneration; 1022 1023 class EdenSpace : public ContiguousSpace { 1024 friend class VMStructs; 1025 private: 1026 DefNewGeneration* _gen; 1027 1028 // _soft_end is used as a soft limit on allocation. As soft limits are 1029 // reached, the slow-path allocation code can invoke other actions and then 1030 // adjust _soft_end up to a new soft limit or to end(). 1031 HeapWord* _soft_end; 1032 1033 public: 1034 EdenSpace(DefNewGeneration* gen) : 1035 _gen(gen), _soft_end(NULL) {} 1036 1037 // Get/set just the 'soft' limit. 1038 HeapWord* soft_end() { return _soft_end; } 1039 HeapWord** soft_end_addr() { return &_soft_end; } 1040 void set_soft_end(HeapWord* value) { _soft_end = value; } 1041 1042 // Override. 1043 void clear(bool mangle_space); 1044 1045 // Set both the 'hard' and 'soft' limits (_end and _soft_end). 1046 void set_end(HeapWord* value) { 1047 set_soft_end(value); 1048 ContiguousSpace::set_end(value); 1049 } 1050 1051 // Allocation (return NULL if full) 1052 HeapWord* allocate(size_t word_size); 1053 HeapWord* par_allocate(size_t word_size); 1054 }; 1055 1056 // Class ConcEdenSpace extends EdenSpace for the sake of safe 1057 // allocation while soft-end is being modified concurrently 1058 1059 class ConcEdenSpace : public EdenSpace { 1060 public: 1061 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } 1062 1063 // Allocation (return NULL if full) 1064 HeapWord* par_allocate(size_t word_size); 1065 }; 1066 1067 1068 // A ContigSpace that Supports an efficient "block_start" operation via 1069 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 1070 // other spaces.) This is the abstract base class for old generation 1071 // (tenured, perm) spaces. 1072 1073 class OffsetTableContigSpace: public ContiguousSpace { 1074 friend class VMStructs; 1075 protected: 1076 BlockOffsetArrayContigSpace _offsets; 1077 Mutex _par_alloc_lock; 1078 1079 public: 1080 // Constructor 1081 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 1082 MemRegion mr); 1083 1084 void set_bottom(HeapWord* value); 1085 void set_end(HeapWord* value); 1086 1087 void clear(bool mangle_space); 1088 1089 inline HeapWord* block_start_const(const void* p) const; 1090 1091 // Add offset table update. 1092 virtual inline HeapWord* allocate(size_t word_size); 1093 inline HeapWord* par_allocate(size_t word_size); 1094 1095 // MarkSweep support phase3 1096 virtual HeapWord* initialize_threshold(); 1097 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 1098 1099 virtual void print_on(outputStream* st) const; 1100 1101 // Debugging 1102 void verify(bool allow_dirty) const; 1103 1104 // Shared space support 1105 void serialize_block_offset_array_offsets(SerializeOopClosure* soc); 1106 }; 1107 1108 1109 // Class TenuredSpace is used by TenuredGeneration 1110 1111 class TenuredSpace: public OffsetTableContigSpace { 1112 friend class VMStructs; 1113 protected: 1114 // Mark sweep support 1115 size_t allowed_dead_ratio() const; 1116 public: 1117 // Constructor 1118 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 1119 MemRegion mr) : 1120 OffsetTableContigSpace(sharedOffsetArray, mr) {} 1121 }; 1122 1123 1124 // Class ContigPermSpace is used by CompactingPermGen 1125 1126 class ContigPermSpace: public OffsetTableContigSpace { 1127 friend class VMStructs; 1128 protected: 1129 // Mark sweep support 1130 size_t allowed_dead_ratio() const; 1131 public: 1132 // Constructor 1133 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : 1134 OffsetTableContigSpace(sharedOffsetArray, mr) {} 1135 }; 1136 1137 #endif // SHARE_VM_MEMORY_SPACE_HPP