1 /* 2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP 27 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" 30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" 31 #include "gc_implementation/shared/collectorCounters.hpp" 32 #include "gc_implementation/shared/markSweep.hpp" 33 #include "gc_implementation/shared/mutableSpace.hpp" 34 #include "memory/sharedHeap.hpp" 35 #include "oops/oop.hpp" 36 37 class ParallelScavengeHeap; 38 class PSAdaptiveSizePolicy; 39 class PSYoungGen; 40 class PSOldGen; 41 class PSPermGen; 42 class ParCompactionManager; 43 class ParallelTaskTerminator; 44 class PSParallelCompact; 45 class GCTaskManager; 46 class GCTaskQueue; 47 class PreGCValues; 48 class MoveAndUpdateClosure; 49 class RefProcTaskExecutor; 50 class ParallelOldTracer; 51 class STWGCTimer; 52 53 // The SplitInfo class holds the information needed to 'split' a source region 54 // so that the live data can be copied to two destination *spaces*. Normally, 55 // all the live data in a region is copied to a single destination space (e.g., 56 // everything live in a region in eden is copied entirely into the old gen). 57 // However, when the heap is nearly full, all the live data in eden may not fit 58 // into the old gen. Copying only some of the regions from eden to old gen 59 // requires finding a region that does not contain a partial object (i.e., no 60 // live object crosses the region boundary) somewhere near the last object that 61 // does fit into the old gen. Since it's not always possible to find such a 62 // region, splitting is necessary for predictable behavior. 63 // 64 // A region is always split at the end of the partial object. This avoids 65 // additional tests when calculating the new location of a pointer, which is a 66 // very hot code path. The partial object and everything to its left will be 67 // copied to another space (call it dest_space_1). The live data to the right 68 // of the partial object will be copied either within the space itself, or to a 69 // different destination space (distinct from dest_space_1). 70 // 71 // Split points are identified during the summary phase, when region 72 // destinations are computed: data about the split, including the 73 // partial_object_size, is recorded in a SplitInfo record and the 74 // partial_object_size field in the summary data is set to zero. The zeroing is 75 // possible (and necessary) since the partial object will move to a different 76 // destination space than anything to its right, thus the partial object should 77 // not affect the locations of any objects to its right. 78 // 79 // The recorded data is used during the compaction phase, but only rarely: when 80 // the partial object on the split region will be copied across a destination 81 // region boundary. This test is made once each time a region is filled, and is 82 // a simple address comparison, so the overhead is negligible (see 83 // PSParallelCompact::first_src_addr()). 84 // 85 // Notes: 86 // 87 // Only regions with partial objects are split; a region without a partial 88 // object does not need any extra bookkeeping. 89 // 90 // At most one region is split per space, so the amount of data required is 91 // constant. 92 // 93 // A region is split only when the destination space would overflow. Once that 94 // happens, the destination space is abandoned and no other data (even from 95 // other source spaces) is targeted to that destination space. Abandoning the 96 // destination space may leave a somewhat large unused area at the end, if a 97 // large object caused the overflow. 98 // 99 // Future work: 100 // 101 // More bookkeeping would be required to continue to use the destination space. 102 // The most general solution would allow data from regions in two different 103 // source spaces to be "joined" in a single destination region. At the very 104 // least, additional code would be required in next_src_region() to detect the 105 // join and skip to an out-of-order source region. If the join region was also 106 // the last destination region to which a split region was copied (the most 107 // likely case), then additional work would be needed to get fill_region() to 108 // stop iteration and switch to a new source region at the right point. Basic 109 // idea would be to use a fake value for the top of the source space. It is 110 // doable, if a bit tricky. 111 // 112 // A simpler (but less general) solution would fill the remainder of the 113 // destination region with a dummy object and continue filling the next 114 // destination region. 115 116 class SplitInfo 117 { 118 public: 119 // Return true if this split info is valid (i.e., if a split has been 120 // recorded). The very first region cannot have a partial object and thus is 121 // never split, so 0 is the 'invalid' value. 122 bool is_valid() const { return _src_region_idx > 0; } 123 124 // Return true if this split holds data for the specified source region. 125 inline bool is_split(size_t source_region) const; 126 127 // The index of the split region, the size of the partial object on that 128 // region and the destination of the partial object. 129 size_t src_region_idx() const { return _src_region_idx; } 130 size_t partial_obj_size() const { return _partial_obj_size; } 131 HeapWord* destination() const { return _destination; } 132 133 // The destination count of the partial object referenced by this split 134 // (either 1 or 2). This must be added to the destination count of the 135 // remainder of the source region. 136 unsigned int destination_count() const { return _destination_count; } 137 138 // If a word within the partial object will be written to the first word of a 139 // destination region, this is the address of the destination region; 140 // otherwise this is NULL. 141 HeapWord* dest_region_addr() const { return _dest_region_addr; } 142 143 // If a word within the partial object will be written to the first word of a 144 // destination region, this is the address of that word within the partial 145 // object; otherwise this is NULL. 146 HeapWord* first_src_addr() const { return _first_src_addr; } 147 148 // Record the data necessary to split the region src_region_idx. 149 void record(size_t src_region_idx, size_t partial_obj_size, 150 HeapWord* destination); 151 152 void clear(); 153 154 DEBUG_ONLY(void verify_clear();) 155 156 private: 157 size_t _src_region_idx; 158 size_t _partial_obj_size; 159 HeapWord* _destination; 160 unsigned int _destination_count; 161 HeapWord* _dest_region_addr; 162 HeapWord* _first_src_addr; 163 }; 164 165 inline bool SplitInfo::is_split(size_t region_idx) const 166 { 167 return _src_region_idx == region_idx && is_valid(); 168 } 169 170 class SpaceInfo 171 { 172 public: 173 MutableSpace* space() const { return _space; } 174 175 // Where the free space will start after the collection. Valid only after the 176 // summary phase completes. 177 HeapWord* new_top() const { return _new_top; } 178 179 // Allows new_top to be set. 180 HeapWord** new_top_addr() { return &_new_top; } 181 182 // Where the smallest allowable dense prefix ends (used only for perm gen). 183 HeapWord* min_dense_prefix() const { return _min_dense_prefix; } 184 185 // Where the dense prefix ends, or the compacted region begins. 186 HeapWord* dense_prefix() const { return _dense_prefix; } 187 188 // The start array for the (generation containing the) space, or NULL if there 189 // is no start array. 190 ObjectStartArray* start_array() const { return _start_array; } 191 192 SplitInfo& split_info() { return _split_info; } 193 194 void set_space(MutableSpace* s) { _space = s; } 195 void set_new_top(HeapWord* addr) { _new_top = addr; } 196 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } 197 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } 198 void set_start_array(ObjectStartArray* s) { _start_array = s; } 199 200 void publish_new_top() const { _space->set_top(_new_top); } 201 202 private: 203 MutableSpace* _space; 204 HeapWord* _new_top; 205 HeapWord* _min_dense_prefix; 206 HeapWord* _dense_prefix; 207 ObjectStartArray* _start_array; 208 SplitInfo _split_info; 209 }; 210 211 class ParallelCompactData 212 { 213 public: 214 // Sizes are in HeapWords, unless indicated otherwise. 215 static const size_t Log2RegionSize; 216 static const size_t RegionSize; 217 static const size_t RegionSizeBytes; 218 219 // Mask for the bits in a size_t to get an offset within a region. 220 static const size_t RegionSizeOffsetMask; 221 // Mask for the bits in a pointer to get an offset within a region. 222 static const size_t RegionAddrOffsetMask; 223 // Mask for the bits in a pointer to get the address of the start of a region. 224 static const size_t RegionAddrMask; 225 226 class RegionData 227 { 228 public: 229 // Destination address of the region. 230 HeapWord* destination() const { return _destination; } 231 232 // The first region containing data destined for this region. 233 size_t source_region() const { return _source_region; } 234 235 // The object (if any) starting in this region and ending in a different 236 // region that could not be updated during the main (parallel) compaction 237 // phase. This is different from _partial_obj_addr, which is an object that 238 // extends onto a source region. However, the two uses do not overlap in 239 // time, so the same field is used to save space. 240 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; } 241 242 // The starting address of the partial object extending onto the region. 243 HeapWord* partial_obj_addr() const { return _partial_obj_addr; } 244 245 // Size of the partial object extending onto the region (words). 246 size_t partial_obj_size() const { return _partial_obj_size; } 247 248 // Size of live data that lies within this region due to objects that start 249 // in this region (words). This does not include the partial object 250 // extending onto the region (if any), or the part of an object that extends 251 // onto the next region (if any). 252 size_t live_obj_size() const { return _dc_and_los & los_mask; } 253 254 // Total live data that lies within the region (words). 255 size_t data_size() const { return partial_obj_size() + live_obj_size(); } 256 257 // The destination_count is the number of other regions to which data from 258 // this region will be copied. At the end of the summary phase, the valid 259 // values of destination_count are 260 // 261 // 0 - data from the region will be compacted completely into itself, or the 262 // region is empty. The region can be claimed and then filled. 263 // 1 - data from the region will be compacted into 1 other region; some 264 // data from the region may also be compacted into the region itself. 265 // 2 - data from the region will be copied to 2 other regions. 266 // 267 // During compaction as regions are emptied, the destination_count is 268 // decremented (atomically) and when it reaches 0, it can be claimed and 269 // then filled. 270 // 271 // A region is claimed for processing by atomically changing the 272 // destination_count to the claimed value (dc_claimed). After a region has 273 // been filled, the destination_count should be set to the completed value 274 // (dc_completed). 275 inline uint destination_count() const; 276 inline uint destination_count_raw() const; 277 278 // The location of the java heap data that corresponds to this region. 279 inline HeapWord* data_location() const; 280 281 // The highest address referenced by objects in this region. 282 inline HeapWord* highest_ref() const; 283 284 // Whether this region is available to be claimed, has been claimed, or has 285 // been completed. 286 // 287 // Minor subtlety: claimed() returns true if the region is marked 288 // completed(), which is desirable since a region must be claimed before it 289 // can be completed. 290 bool available() const { return _dc_and_los < dc_one; } 291 bool claimed() const { return _dc_and_los >= dc_claimed; } 292 bool completed() const { return _dc_and_los >= dc_completed; } 293 294 // These are not atomic. 295 void set_destination(HeapWord* addr) { _destination = addr; } 296 void set_source_region(size_t region) { _source_region = region; } 297 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 298 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 299 void set_partial_obj_size(size_t words) { 300 _partial_obj_size = (region_sz_t) words; 301 } 302 303 inline void set_destination_count(uint count); 304 inline void set_live_obj_size(size_t words); 305 inline void set_data_location(HeapWord* addr); 306 inline void set_completed(); 307 inline bool claim_unsafe(); 308 309 // These are atomic. 310 inline void add_live_obj(size_t words); 311 inline void set_highest_ref(HeapWord* addr); 312 inline void decrement_destination_count(); 313 inline bool claim(); 314 315 private: 316 // The type used to represent object sizes within a region. 317 typedef uint region_sz_t; 318 319 // Constants for manipulating the _dc_and_los field, which holds both the 320 // destination count and live obj size. The live obj size lives at the 321 // least significant end so no masking is necessary when adding. 322 static const region_sz_t dc_shift; // Shift amount. 323 static const region_sz_t dc_mask; // Mask for destination count. 324 static const region_sz_t dc_one; // 1, shifted appropriately. 325 static const region_sz_t dc_claimed; // Region has been claimed. 326 static const region_sz_t dc_completed; // Region has been completed. 327 static const region_sz_t los_mask; // Mask for live obj size. 328 329 HeapWord* _destination; 330 size_t _source_region; 331 HeapWord* _partial_obj_addr; 332 region_sz_t _partial_obj_size; 333 region_sz_t volatile _dc_and_los; 334 #ifdef ASSERT 335 // These enable optimizations that are only partially implemented. Use 336 // debug builds to prevent the code fragments from breaking. 337 HeapWord* _data_location; 338 HeapWord* _highest_ref; 339 #endif // #ifdef ASSERT 340 341 #ifdef ASSERT 342 public: 343 uint _pushed; // 0 until region is pushed onto a worker's stack 344 private: 345 #endif 346 }; 347 348 public: 349 ParallelCompactData(); 350 bool initialize(MemRegion covered_region); 351 352 size_t region_count() const { return _region_count; } 353 354 // Convert region indices to/from RegionData pointers. 355 inline RegionData* region(size_t region_idx) const; 356 inline size_t region(const RegionData* const region_ptr) const; 357 358 // Returns true if the given address is contained within the region 359 bool region_contains(size_t region_index, HeapWord* addr); 360 361 void add_obj(HeapWord* addr, size_t len); 362 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } 363 364 // Fill in the regions covering [beg, end) so that no data moves; i.e., the 365 // destination of region n is simply the start of region n. The argument beg 366 // must be region-aligned; end need not be. 367 void summarize_dense_prefix(HeapWord* beg, HeapWord* end); 368 369 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info, 370 HeapWord* destination, HeapWord* target_end, 371 HeapWord** target_next); 372 bool summarize(SplitInfo& split_info, 373 HeapWord* source_beg, HeapWord* source_end, 374 HeapWord** source_next, 375 HeapWord* target_beg, HeapWord* target_end, 376 HeapWord** target_next); 377 378 void clear(); 379 void clear_range(size_t beg_region, size_t end_region); 380 void clear_range(HeapWord* beg, HeapWord* end) { 381 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end)); 382 } 383 384 // Return the number of words between addr and the start of the region 385 // containing addr. 386 inline size_t region_offset(const HeapWord* addr) const; 387 388 // Convert addresses to/from a region index or region pointer. 389 inline size_t addr_to_region_idx(const HeapWord* addr) const; 390 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const; 391 inline HeapWord* region_to_addr(size_t region) const; 392 inline HeapWord* region_to_addr(size_t region, size_t offset) const; 393 inline HeapWord* region_to_addr(const RegionData* region) const; 394 395 inline HeapWord* region_align_down(HeapWord* addr) const; 396 inline HeapWord* region_align_up(HeapWord* addr) const; 397 inline bool is_region_aligned(HeapWord* addr) const; 398 399 // Return the address one past the end of the partial object. 400 HeapWord* partial_obj_end(size_t region_idx) const; 401 402 // Return the new location of the object p after the 403 // the compaction. 404 HeapWord* calc_new_pointer(HeapWord* addr); 405 406 HeapWord* calc_new_pointer(oop p) { 407 return calc_new_pointer((HeapWord*) p); 408 } 409 410 // Return the updated address for the given klass 411 klassOop calc_new_klass(klassOop); 412 413 #ifdef ASSERT 414 void verify_clear(const PSVirtualSpace* vspace); 415 void verify_clear(); 416 #endif // #ifdef ASSERT 417 418 private: 419 bool initialize_region_data(size_t region_size); 420 PSVirtualSpace* create_vspace(size_t count, size_t element_size); 421 422 private: 423 HeapWord* _region_start; 424 #ifdef ASSERT 425 HeapWord* _region_end; 426 #endif // #ifdef ASSERT 427 428 PSVirtualSpace* _region_vspace; 429 RegionData* _region_data; 430 size_t _region_count; 431 }; 432 433 inline uint 434 ParallelCompactData::RegionData::destination_count_raw() const 435 { 436 return _dc_and_los & dc_mask; 437 } 438 439 inline uint 440 ParallelCompactData::RegionData::destination_count() const 441 { 442 return destination_count_raw() >> dc_shift; 443 } 444 445 inline void 446 ParallelCompactData::RegionData::set_destination_count(uint count) 447 { 448 assert(count <= (dc_completed >> dc_shift), "count too large"); 449 const region_sz_t live_sz = (region_sz_t) live_obj_size(); 450 _dc_and_los = (count << dc_shift) | live_sz; 451 } 452 453 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words) 454 { 455 assert(words <= los_mask, "would overflow"); 456 _dc_and_los = destination_count_raw() | (region_sz_t)words; 457 } 458 459 inline void ParallelCompactData::RegionData::decrement_destination_count() 460 { 461 assert(_dc_and_los < dc_claimed, "already claimed"); 462 assert(_dc_and_los >= dc_one, "count would go negative"); 463 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); 464 } 465 466 inline HeapWord* ParallelCompactData::RegionData::data_location() const 467 { 468 DEBUG_ONLY(return _data_location;) 469 NOT_DEBUG(return NULL;) 470 } 471 472 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const 473 { 474 DEBUG_ONLY(return _highest_ref;) 475 NOT_DEBUG(return NULL;) 476 } 477 478 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr) 479 { 480 DEBUG_ONLY(_data_location = addr;) 481 } 482 483 inline void ParallelCompactData::RegionData::set_completed() 484 { 485 assert(claimed(), "must be claimed first"); 486 _dc_and_los = dc_completed | (region_sz_t) live_obj_size(); 487 } 488 489 // MT-unsafe claiming of a region. Should only be used during single threaded 490 // execution. 491 inline bool ParallelCompactData::RegionData::claim_unsafe() 492 { 493 if (available()) { 494 _dc_and_los |= dc_claimed; 495 return true; 496 } 497 return false; 498 } 499 500 inline void ParallelCompactData::RegionData::add_live_obj(size_t words) 501 { 502 assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); 503 Atomic::add((int) words, (volatile int*) &_dc_and_los); 504 } 505 506 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) 507 { 508 #ifdef ASSERT 509 HeapWord* tmp = _highest_ref; 510 while (addr > tmp) { 511 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); 512 } 513 #endif // #ifdef ASSERT 514 } 515 516 inline bool ParallelCompactData::RegionData::claim() 517 { 518 const int los = (int) live_obj_size(); 519 const int old = Atomic::cmpxchg(dc_claimed | los, 520 (volatile int*) &_dc_and_los, los); 521 return old == los; 522 } 523 524 inline ParallelCompactData::RegionData* 525 ParallelCompactData::region(size_t region_idx) const 526 { 527 assert(region_idx <= region_count(), "bad arg"); 528 return _region_data + region_idx; 529 } 530 531 inline size_t 532 ParallelCompactData::region(const RegionData* const region_ptr) const 533 { 534 assert(region_ptr >= _region_data, "bad arg"); 535 assert(region_ptr <= _region_data + region_count(), "bad arg"); 536 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); 537 } 538 539 inline size_t 540 ParallelCompactData::region_offset(const HeapWord* addr) const 541 { 542 assert(addr >= _region_start, "bad addr"); 543 assert(addr <= _region_end, "bad addr"); 544 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize; 545 } 546 547 inline size_t 548 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const 549 { 550 assert(addr >= _region_start, "bad addr"); 551 assert(addr <= _region_end, "bad addr"); 552 return pointer_delta(addr, _region_start) >> Log2RegionSize; 553 } 554 555 inline ParallelCompactData::RegionData* 556 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const 557 { 558 return region(addr_to_region_idx(addr)); 559 } 560 561 inline HeapWord* 562 ParallelCompactData::region_to_addr(size_t region) const 563 { 564 assert(region <= _region_count, "region out of range"); 565 return _region_start + (region << Log2RegionSize); 566 } 567 568 inline HeapWord* 569 ParallelCompactData::region_to_addr(const RegionData* region) const 570 { 571 return region_to_addr(pointer_delta(region, _region_data, 572 sizeof(RegionData))); 573 } 574 575 inline HeapWord* 576 ParallelCompactData::region_to_addr(size_t region, size_t offset) const 577 { 578 assert(region <= _region_count, "region out of range"); 579 assert(offset < RegionSize, "offset too big"); // This may be too strict. 580 return region_to_addr(region) + offset; 581 } 582 583 inline HeapWord* 584 ParallelCompactData::region_align_down(HeapWord* addr) const 585 { 586 assert(addr >= _region_start, "bad addr"); 587 assert(addr < _region_end + RegionSize, "bad addr"); 588 return (HeapWord*)(size_t(addr) & RegionAddrMask); 589 } 590 591 inline HeapWord* 592 ParallelCompactData::region_align_up(HeapWord* addr) const 593 { 594 assert(addr >= _region_start, "bad addr"); 595 assert(addr <= _region_end, "bad addr"); 596 return region_align_down(addr + RegionSizeOffsetMask); 597 } 598 599 inline bool 600 ParallelCompactData::is_region_aligned(HeapWord* addr) const 601 { 602 return region_offset(addr) == 0; 603 } 604 605 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the 606 // do_addr() method. 607 // 608 // The closure is initialized with the number of heap words to process 609 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr() 610 // methods in subclasses should update the total as words are processed. Since 611 // only one subclass actually uses this mechanism to terminate iteration, the 612 // default initial value is > 0. The implementation is here and not in the 613 // single subclass that uses it to avoid making is_full() virtual, and thus 614 // adding a virtual call per live object. 615 616 class ParMarkBitMapClosure: public StackObj { 617 public: 618 typedef ParMarkBitMap::idx_t idx_t; 619 typedef ParMarkBitMap::IterationStatus IterationStatus; 620 621 public: 622 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, 623 size_t words = max_uintx); 624 625 inline ParCompactionManager* compaction_manager() const; 626 inline ParMarkBitMap* bitmap() const; 627 inline size_t words_remaining() const; 628 inline bool is_full() const; 629 inline HeapWord* source() const; 630 631 inline void set_source(HeapWord* addr); 632 633 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0; 634 635 protected: 636 inline void decrement_words_remaining(size_t words); 637 638 private: 639 ParMarkBitMap* const _bitmap; 640 ParCompactionManager* const _compaction_manager; 641 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger. 642 size_t _words_remaining; // Words left to copy. 643 644 protected: 645 HeapWord* _source; // Next addr that would be read. 646 }; 647 648 inline 649 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap, 650 ParCompactionManager* cm, 651 size_t words): 652 _bitmap(bitmap), _compaction_manager(cm) 653 #ifdef ASSERT 654 , _initial_words_remaining(words) 655 #endif 656 { 657 _words_remaining = words; 658 _source = NULL; 659 } 660 661 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const { 662 return _compaction_manager; 663 } 664 665 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const { 666 return _bitmap; 667 } 668 669 inline size_t ParMarkBitMapClosure::words_remaining() const { 670 return _words_remaining; 671 } 672 673 inline bool ParMarkBitMapClosure::is_full() const { 674 return words_remaining() == 0; 675 } 676 677 inline HeapWord* ParMarkBitMapClosure::source() const { 678 return _source; 679 } 680 681 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) { 682 _source = addr; 683 } 684 685 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) { 686 assert(_words_remaining >= words, "processed too many words"); 687 _words_remaining -= words; 688 } 689 690 // The UseParallelOldGC collector is a stop-the-world garbage collector that 691 // does parts of the collection using parallel threads. The collection includes 692 // the tenured generation and the young generation. The permanent generation is 693 // collected at the same time as the other two generations but the permanent 694 // generation is collect by a single GC thread. The permanent generation is 695 // collected serially because of the requirement that during the processing of a 696 // klass AAA, any objects reference by AAA must already have been processed. 697 // This requirement is enforced by a left (lower address) to right (higher 698 // address) sliding compaction. 699 // 700 // There are four phases of the collection. 701 // 702 // - marking phase 703 // - summary phase 704 // - compacting phase 705 // - clean up phase 706 // 707 // Roughly speaking these phases correspond, respectively, to 708 // - mark all the live objects 709 // - calculate the destination of each object at the end of the collection 710 // - move the objects to their destination 711 // - update some references and reinitialize some variables 712 // 713 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The 714 // marking phase is implemented in PSParallelCompact::marking_phase() and does a 715 // complete marking of the heap. The summary phase is implemented in 716 // PSParallelCompact::summary_phase(). The move and update phase is implemented 717 // in PSParallelCompact::compact(). 718 // 719 // A space that is being collected is divided into regions and with each region 720 // is associated an object of type ParallelCompactData. Each region is of a 721 // fixed size and typically will contain more than 1 object and may have parts 722 // of objects at the front and back of the region. 723 // 724 // region -----+---------------------+---------- 725 // objects covered [ AAA )[ BBB )[ CCC )[ DDD ) 726 // 727 // The marking phase does a complete marking of all live objects in the heap. 728 // The marking also compiles the size of the data for all live objects covered 729 // by the region. This size includes the part of any live object spanning onto 730 // the region (part of AAA if it is live) from the front, all live objects 731 // contained in the region (BBB and/or CCC if they are live), and the part of 732 // any live objects covered by the region that extends off the region (part of 733 // DDD if it is live). The marking phase uses multiple GC threads and marking 734 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is 735 // done atomically as is the accumulation of the size of the live objects 736 // covered by a region. 737 // 738 // The summary phase calculates the total live data to the left of each region 739 // XXX. Based on that total and the bottom of the space, it can calculate the 740 // starting location of the live data in XXX. The summary phase calculates for 741 // each region XXX quantites such as 742 // 743 // - the amount of live data at the beginning of a region from an object 744 // entering the region. 745 // - the location of the first live data on the region 746 // - a count of the number of regions receiving live data from XXX. 747 // 748 // See ParallelCompactData for precise details. The summary phase also 749 // calculates the dense prefix for the compaction. The dense prefix is a 750 // portion at the beginning of the space that is not moved. The objects in the 751 // dense prefix do need to have their object references updated. See method 752 // summarize_dense_prefix(). 753 // 754 // The summary phase is done using 1 GC thread. 755 // 756 // The compaction phase moves objects to their new location and updates all 757 // references in the object. 758 // 759 // A current exception is that objects that cross a region boundary are moved 760 // but do not have their references updated. References are not updated because 761 // it cannot easily be determined if the klass pointer KKK for the object AAA 762 // has been updated. KKK likely resides in a region to the left of the region 763 // containing AAA. These AAA's have there references updated at the end in a 764 // clean up phase. See the method PSParallelCompact::update_deferred_objects(). 765 // An alternate strategy is being investigated for this deferral of updating. 766 // 767 // Compaction is done on a region basis. A region that is ready to be filled is 768 // put on a ready list and GC threads take region off the list and fill them. A 769 // region is ready to be filled if it empty of live objects. Such a region may 770 // have been initially empty (only contained dead objects) or may have had all 771 // its live objects copied out already. A region that compacts into itself is 772 // also ready for filling. The ready list is initially filled with empty 773 // regions and regions compacting into themselves. There is always at least 1 774 // region that can be put on the ready list. The regions are atomically added 775 // and removed from the ready list. 776 777 class PSParallelCompact : AllStatic { 778 public: 779 // Convenient access to type names. 780 typedef ParMarkBitMap::idx_t idx_t; 781 typedef ParallelCompactData::RegionData RegionData; 782 783 typedef enum { 784 perm_space_id, old_space_id, eden_space_id, 785 from_space_id, to_space_id, last_space_id 786 } SpaceId; 787 788 public: 789 // Inline closure decls 790 // 791 class IsAliveClosure: public BoolObjectClosure { 792 public: 793 virtual void do_object(oop p); 794 virtual bool do_object_b(oop p); 795 }; 796 797 class KeepAliveClosure: public OopClosure { 798 private: 799 ParCompactionManager* _compaction_manager; 800 protected: 801 template <class T> inline void do_oop_work(T* p); 802 public: 803 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } 804 virtual void do_oop(oop* p); 805 virtual void do_oop(narrowOop* p); 806 }; 807 808 // Current unused 809 class FollowRootClosure: public OopsInGenClosure { 810 private: 811 ParCompactionManager* _compaction_manager; 812 public: 813 FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } 814 virtual void do_oop(oop* p); 815 virtual void do_oop(narrowOop* p); 816 }; 817 818 class FollowStackClosure: public VoidClosure { 819 private: 820 ParCompactionManager* _compaction_manager; 821 public: 822 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } 823 virtual void do_void(); 824 }; 825 826 class AdjustPointerClosure: public OopsInGenClosure { 827 private: 828 bool _is_root; 829 public: 830 AdjustPointerClosure(bool is_root) : _is_root(is_root) { } 831 virtual void do_oop(oop* p); 832 virtual void do_oop(narrowOop* p); 833 // do not walk from thread stacks to the code cache on this phase 834 virtual void do_code_blob(CodeBlob* cb) const { } 835 }; 836 837 friend class KeepAliveClosure; 838 friend class FollowStackClosure; 839 friend class AdjustPointerClosure; 840 friend class FollowRootClosure; 841 friend class instanceKlassKlass; 842 friend class RefProcTaskProxy; 843 844 private: 845 static STWGCTimer _gc_timer; 846 static ParallelOldTracer _gc_tracer; 847 static elapsedTimer _accumulated_time; 848 static unsigned int _total_invocations; 849 static unsigned int _maximum_compaction_gc_num; 850 static jlong _time_of_last_gc; // ms 851 static CollectorCounters* _counters; 852 static ParMarkBitMap _mark_bitmap; 853 static ParallelCompactData _summary_data; 854 static IsAliveClosure _is_alive_closure; 855 static SpaceInfo _space_info[last_space_id]; 856 static bool _print_phases; 857 static AdjustPointerClosure _adjust_root_pointer_closure; 858 static AdjustPointerClosure _adjust_pointer_closure; 859 860 // Reference processing (used in ...follow_contents) 861 static ReferenceProcessor* _ref_processor; 862 863 // Updated location of intArrayKlassObj. 864 static klassOop _updated_int_array_klass_obj; 865 866 // Values computed at initialization and used by dead_wood_limiter(). 867 static double _dwl_mean; 868 static double _dwl_std_dev; 869 static double _dwl_first_term; 870 static double _dwl_adjustment; 871 #ifdef ASSERT 872 static bool _dwl_initialized; 873 #endif // #ifdef ASSERT 874 875 private: 876 // Closure accessors 877 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } 878 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } 879 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } 880 881 static void initialize_space_info(); 882 883 // Return true if details about individual phases should be printed. 884 static inline bool print_phases(); 885 886 // Clear the marking bitmap and summary data that cover the specified space. 887 static void clear_data_covering_space(SpaceId id); 888 889 static void pre_compact(PreGCValues* pre_gc_values); 890 static void post_compact(); 891 892 // Mark live objects 893 static void marking_phase(ParCompactionManager* cm, 894 bool maximum_heap_compaction, 895 ParallelOldTracer *gc_tracer); 896 static void follow_weak_klass_links(); 897 static void follow_mdo_weak_refs(); 898 899 template <class T> static inline void adjust_pointer(T* p, bool is_root); 900 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } 901 902 template <class T> 903 static inline void follow_root(ParCompactionManager* cm, T* p); 904 905 // Compute the dense prefix for the designated space. This is an experimental 906 // implementation currently not used in production. 907 static HeapWord* compute_dense_prefix_via_density(const SpaceId id, 908 bool maximum_compaction); 909 910 // Methods used to compute the dense prefix. 911 912 // Compute the value of the normal distribution at x = density. The mean and 913 // standard deviation are values saved by initialize_dead_wood_limiter(). 914 static inline double normal_distribution(double density); 915 916 // Initialize the static vars used by dead_wood_limiter(). 917 static void initialize_dead_wood_limiter(); 918 919 // Return the percentage of space that can be treated as "dead wood" (i.e., 920 // not reclaimed). 921 static double dead_wood_limiter(double density, size_t min_percent); 922 923 // Find the first (left-most) region in the range [beg, end) that has at least 924 // dead_words of dead space to the left. The argument beg must be the first 925 // region in the space that is not completely live. 926 static RegionData* dead_wood_limit_region(const RegionData* beg, 927 const RegionData* end, 928 size_t dead_words); 929 930 // Return a pointer to the first region in the range [beg, end) that is not 931 // completely full. 932 static RegionData* first_dead_space_region(const RegionData* beg, 933 const RegionData* end); 934 935 // Return a value indicating the benefit or 'yield' if the compacted region 936 // were to start (or equivalently if the dense prefix were to end) at the 937 // candidate region. Higher values are better. 938 // 939 // The value is based on the amount of space reclaimed vs. the costs of (a) 940 // updating references in the dense prefix plus (b) copying objects and 941 // updating references in the compacted region. 942 static inline double reclaimed_ratio(const RegionData* const candidate, 943 HeapWord* const bottom, 944 HeapWord* const top, 945 HeapWord* const new_top); 946 947 // Compute the dense prefix for the designated space. 948 static HeapWord* compute_dense_prefix(const SpaceId id, 949 bool maximum_compaction); 950 951 // Return true if dead space crosses onto the specified Region; bit must be 952 // the bit index corresponding to the first word of the Region. 953 static inline bool dead_space_crosses_boundary(const RegionData* region, 954 idx_t bit); 955 956 // Summary phase utility routine to fill dead space (if any) at the dense 957 // prefix boundary. Should only be called if the the dense prefix is 958 // non-empty. 959 static void fill_dense_prefix_end(SpaceId id); 960 961 // Clear the summary data source_region field for the specified addresses. 962 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr); 963 964 #ifndef PRODUCT 965 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot). 966 967 // Fill the region [start, start + words) with live object(s). Only usable 968 // for the old and permanent generations. 969 static void fill_with_live_objects(SpaceId id, HeapWord* const start, 970 size_t words); 971 // Include the new objects in the summary data. 972 static void summarize_new_objects(SpaceId id, HeapWord* start); 973 974 // Add live objects to a survivor space since it's rare that both survivors 975 // are non-empty. 976 static void provoke_split_fill_survivor(SpaceId id); 977 978 // Add live objects and/or choose the dense prefix to provoke splitting. 979 static void provoke_split(bool & maximum_compaction); 980 #endif 981 982 static void summarize_spaces_quick(); 983 static void summarize_space(SpaceId id, bool maximum_compaction); 984 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); 985 986 // Adjust addresses in roots. Does not adjust addresses in heap. 987 static void adjust_roots(); 988 989 // Serial code executed in preparation for the compaction phase. 990 static void compact_prologue(); 991 992 // Move objects to new locations. 993 static void compact_perm(ParCompactionManager* cm); 994 static void compact(); 995 996 // Add available regions to the stack and draining tasks to the task queue. 997 static void enqueue_region_draining_tasks(GCTaskQueue* q, 998 uint parallel_gc_threads); 999 1000 // Add dense prefix update tasks to the task queue. 1001 static void enqueue_dense_prefix_tasks(GCTaskQueue* q, 1002 uint parallel_gc_threads); 1003 1004 // Add region stealing tasks to the task queue. 1005 static void enqueue_region_stealing_tasks( 1006 GCTaskQueue* q, 1007 ParallelTaskTerminator* terminator_ptr, 1008 uint parallel_gc_threads); 1009 1010 // If objects are left in eden after a collection, try to move the boundary 1011 // and absorb them into the old gen. Returns true if eden was emptied. 1012 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 1013 PSYoungGen* young_gen, 1014 PSOldGen* old_gen); 1015 1016 // Reset time since last full gc 1017 static void reset_millis_since_last_gc(); 1018 1019 protected: 1020 #ifdef VALIDATE_MARK_SWEEP 1021 static GrowableArray<void*>* _root_refs_stack; 1022 static GrowableArray<oop> * _live_oops; 1023 static GrowableArray<oop> * _live_oops_moved_to; 1024 static GrowableArray<size_t>* _live_oops_size; 1025 static size_t _live_oops_index; 1026 static size_t _live_oops_index_at_perm; 1027 static GrowableArray<void*>* _other_refs_stack; 1028 static GrowableArray<void*>* _adjusted_pointers; 1029 static bool _pointer_tracking; 1030 static bool _root_tracking; 1031 1032 // The following arrays are saved since the time of the last GC and 1033 // assist in tracking down problems where someone has done an errant 1034 // store into the heap, usually to an oop that wasn't properly 1035 // handleized across a GC. If we crash or otherwise fail before the 1036 // next GC, we can query these arrays to find out the object we had 1037 // intended to do the store to (assuming it is still alive) and the 1038 // offset within that object. Covered under RecordMarkSweepCompaction. 1039 static GrowableArray<HeapWord*> * _cur_gc_live_oops; 1040 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to; 1041 static GrowableArray<size_t>* _cur_gc_live_oops_size; 1042 static GrowableArray<HeapWord*> * _last_gc_live_oops; 1043 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to; 1044 static GrowableArray<size_t>* _last_gc_live_oops_size; 1045 #endif 1046 1047 public: 1048 class MarkAndPushClosure: public OopClosure { 1049 private: 1050 ParCompactionManager* _compaction_manager; 1051 public: 1052 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } 1053 virtual void do_oop(oop* p); 1054 virtual void do_oop(narrowOop* p); 1055 }; 1056 1057 PSParallelCompact(); 1058 1059 // Convenient accessor for Universe::heap(). 1060 static ParallelScavengeHeap* gc_heap() { 1061 return (ParallelScavengeHeap*)Universe::heap(); 1062 } 1063 1064 static void invoke(bool maximum_heap_compaction); 1065 static bool invoke_no_policy(bool maximum_heap_compaction); 1066 1067 static void post_initialize(); 1068 // Perform initialization for PSParallelCompact that requires 1069 // allocations. This should be called during the VM initialization 1070 // at a pointer where it would be appropriate to return a JNI_ENOMEM 1071 // in the event of a failure. 1072 static bool initialize(); 1073 1074 // Public accessors 1075 static elapsedTimer* accumulated_time() { return &_accumulated_time; } 1076 static unsigned int total_invocations() { return _total_invocations; } 1077 static CollectorCounters* counters() { return _counters; } 1078 1079 // Used to add tasks 1080 static GCTaskManager* const gc_task_manager(); 1081 static klassOop updated_int_array_klass_obj() { 1082 return _updated_int_array_klass_obj; 1083 } 1084 1085 // Marking support 1086 static inline bool mark_obj(oop obj); 1087 // Check mark and maybe push on marking stack 1088 template <class T> static inline void mark_and_push(ParCompactionManager* cm, 1089 T* p); 1090 1091 // Compaction support. 1092 // Return true if p is in the range [beg_addr, end_addr). 1093 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr); 1094 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr); 1095 1096 // Convenience wrappers for per-space data kept in _space_info. 1097 static inline MutableSpace* space(SpaceId space_id); 1098 static inline HeapWord* new_top(SpaceId space_id); 1099 static inline HeapWord* dense_prefix(SpaceId space_id); 1100 static inline ObjectStartArray* start_array(SpaceId space_id); 1101 1102 // Return true if the klass should be updated. 1103 static inline bool should_update_klass(klassOop k); 1104 1105 // Move and update the live objects in the specified space. 1106 static void move_and_update(ParCompactionManager* cm, SpaceId space_id); 1107 1108 // Process the end of the given region range in the dense prefix. 1109 // This includes saving any object not updated. 1110 static void dense_prefix_regions_epilogue(ParCompactionManager* cm, 1111 size_t region_start_index, 1112 size_t region_end_index, 1113 idx_t exiting_object_offset, 1114 idx_t region_offset_start, 1115 idx_t region_offset_end); 1116 1117 // Update a region in the dense prefix. For each live object 1118 // in the region, update it's interior references. For each 1119 // dead object, fill it with deadwood. Dead space at the end 1120 // of a region range will be filled to the start of the next 1121 // live object regardless of the region_index_end. None of the 1122 // objects in the dense prefix move and dead space is dead 1123 // (holds only dead objects that don't need any processing), so 1124 // dead space can be filled in any order. 1125 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, 1126 SpaceId space_id, 1127 size_t region_index_start, 1128 size_t region_index_end); 1129 1130 // Return the address of the count + 1st live word in the range [beg, end). 1131 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count); 1132 1133 // Return the address of the word to be copied to dest_addr, which must be 1134 // aligned to a region boundary. 1135 static HeapWord* first_src_addr(HeapWord* const dest_addr, 1136 SpaceId src_space_id, 1137 size_t src_region_idx); 1138 1139 // Determine the next source region, set closure.source() to the start of the 1140 // new region return the region index. Parameter end_addr is the address one 1141 // beyond the end of source range just processed. If necessary, switch to a 1142 // new source space and set src_space_id (in-out parameter) and src_space_top 1143 // (out parameter) accordingly. 1144 static size_t next_src_region(MoveAndUpdateClosure& closure, 1145 SpaceId& src_space_id, 1146 HeapWord*& src_space_top, 1147 HeapWord* end_addr); 1148 1149 // Decrement the destination count for each non-empty source region in the 1150 // range [beg_region, region(region_align_up(end_addr))). If the destination 1151 // count for a region goes to 0 and it needs to be filled, enqueue it. 1152 static void decrement_destination_counts(ParCompactionManager* cm, 1153 SpaceId src_space_id, 1154 size_t beg_region, 1155 HeapWord* end_addr); 1156 1157 // Fill a region, copying objects from one or more source regions. 1158 static void fill_region(ParCompactionManager* cm, size_t region_idx); 1159 static void fill_and_update_region(ParCompactionManager* cm, size_t region) { 1160 fill_region(cm, region); 1161 } 1162 1163 // Update the deferred objects in the space. 1164 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); 1165 1166 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } 1167 static ParallelCompactData& summary_data() { return _summary_data; } 1168 1169 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } 1170 static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } 1171 1172 // Reference Processing 1173 static ReferenceProcessor* const ref_processor() { return _ref_processor; } 1174 1175 static STWGCTimer* gc_timer() { return &_gc_timer; } 1176 1177 // Return the SpaceId for the given address. 1178 static SpaceId space_id(HeapWord* addr); 1179 1180 // Time since last full gc (in milliseconds). 1181 static jlong millis_since_last_gc(); 1182 1183 #ifdef VALIDATE_MARK_SWEEP 1184 static void track_adjusted_pointer(void* p, bool isroot); 1185 static void check_adjust_pointer(void* p); 1186 static void track_interior_pointers(oop obj); 1187 static void check_interior_pointers(); 1188 1189 static void reset_live_oop_tracking(bool at_perm); 1190 static void register_live_oop(oop p, size_t size); 1191 static void validate_live_oop(oop p, size_t size); 1192 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top); 1193 static void compaction_complete(); 1194 1195 // Querying operation of RecordMarkSweepCompaction results. 1196 // Finds and prints the current base oop and offset for a word 1197 // within an oop that was live during the last GC. Helpful for 1198 // tracking down heap stomps. 1199 static void print_new_location_of_heap_address(HeapWord* q); 1200 #endif // #ifdef VALIDATE_MARK_SWEEP 1201 1202 // Call backs for class unloading 1203 // Update subklass/sibling/implementor links at end of marking. 1204 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k); 1205 1206 // Clear unmarked oops in MDOs at the end of marking. 1207 static void revisit_mdo(ParCompactionManager* cm, DataLayout* p); 1208 1209 #ifndef PRODUCT 1210 // Debugging support. 1211 static const char* space_names[last_space_id]; 1212 static void print_region_ranges(); 1213 static void print_dense_prefix_stats(const char* const algorithm, 1214 const SpaceId id, 1215 const bool maximum_compaction, 1216 HeapWord* const addr); 1217 static void summary_phase_msg(SpaceId dst_space_id, 1218 HeapWord* dst_beg, HeapWord* dst_end, 1219 SpaceId src_space_id, 1220 HeapWord* src_beg, HeapWord* src_end); 1221 #endif // #ifndef PRODUCT 1222 1223 #ifdef ASSERT 1224 // Sanity check the new location of a word in the heap. 1225 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr); 1226 // Verify that all the regions have been emptied. 1227 static void verify_complete(SpaceId space_id); 1228 #endif // #ifdef ASSERT 1229 }; 1230 1231 inline bool PSParallelCompact::mark_obj(oop obj) { 1232 const int obj_size = obj->size(); 1233 if (mark_bitmap()->mark_obj(obj, obj_size)) { 1234 _summary_data.add_obj(obj, obj_size); 1235 return true; 1236 } else { 1237 return false; 1238 } 1239 } 1240 1241 template <class T> 1242 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) { 1243 assert(!Universe::heap()->is_in_reserved(p), 1244 "roots shouldn't be things within the heap"); 1245 #ifdef VALIDATE_MARK_SWEEP 1246 if (ValidateMarkSweep) { 1247 guarantee(!_root_refs_stack->contains(p), "should only be in here once"); 1248 _root_refs_stack->push(p); 1249 } 1250 #endif 1251 T heap_oop = oopDesc::load_heap_oop(p); 1252 if (!oopDesc::is_null(heap_oop)) { 1253 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1254 if (mark_bitmap()->is_unmarked(obj)) { 1255 if (mark_obj(obj)) { 1256 obj->follow_contents(cm); 1257 } 1258 } 1259 } 1260 cm->follow_marking_stacks(); 1261 } 1262 1263 template <class T> 1264 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { 1265 T heap_oop = oopDesc::load_heap_oop(p); 1266 if (!oopDesc::is_null(heap_oop)) { 1267 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1268 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { 1269 cm->push(obj); 1270 } 1271 } 1272 } 1273 1274 template <class T> 1275 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { 1276 T heap_oop = oopDesc::load_heap_oop(p); 1277 if (!oopDesc::is_null(heap_oop)) { 1278 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1279 oop new_obj = (oop)summary_data().calc_new_pointer(obj); 1280 assert(new_obj != NULL || // is forwarding ptr? 1281 obj->is_shared(), // never forwarded? 1282 "should be forwarded"); 1283 // Just always do the update unconditionally? 1284 if (new_obj != NULL) { 1285 assert(Universe::heap()->is_in_reserved(new_obj), 1286 "should be in object space"); 1287 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 1288 } 1289 } 1290 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot)); 1291 } 1292 1293 template <class T> 1294 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { 1295 #ifdef VALIDATE_MARK_SWEEP 1296 if (ValidateMarkSweep) { 1297 if (!Universe::heap()->is_in_reserved(p)) { 1298 _root_refs_stack->push(p); 1299 } else { 1300 _other_refs_stack->push(p); 1301 } 1302 } 1303 #endif 1304 mark_and_push(_compaction_manager, p); 1305 } 1306 1307 inline bool PSParallelCompact::print_phases() { 1308 return _print_phases; 1309 } 1310 1311 inline double PSParallelCompact::normal_distribution(double density) { 1312 assert(_dwl_initialized, "uninitialized"); 1313 const double squared_term = (density - _dwl_mean) / _dwl_std_dev; 1314 return _dwl_first_term * exp(-0.5 * squared_term * squared_term); 1315 } 1316 1317 inline bool 1318 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region, 1319 idx_t bit) 1320 { 1321 assert(bit > 0, "cannot call this for the first bit/region"); 1322 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit), 1323 "sanity check"); 1324 1325 // Dead space crosses the boundary if (1) a partial object does not extend 1326 // onto the region, (2) an object does not start at the beginning of the 1327 // region, and (3) an object does not end at the end of the prior region. 1328 return region->partial_obj_size() == 0 && 1329 !_mark_bitmap.is_obj_beg(bit) && 1330 !_mark_bitmap.is_obj_end(bit - 1); 1331 } 1332 1333 inline bool 1334 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) { 1335 return p >= beg_addr && p < end_addr; 1336 } 1337 1338 inline bool 1339 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) { 1340 return is_in((HeapWord*)p, beg_addr, end_addr); 1341 } 1342 1343 inline MutableSpace* PSParallelCompact::space(SpaceId id) { 1344 assert(id < last_space_id, "id out of range"); 1345 return _space_info[id].space(); 1346 } 1347 1348 inline HeapWord* PSParallelCompact::new_top(SpaceId id) { 1349 assert(id < last_space_id, "id out of range"); 1350 return _space_info[id].new_top(); 1351 } 1352 1353 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) { 1354 assert(id < last_space_id, "id out of range"); 1355 return _space_info[id].dense_prefix(); 1356 } 1357 1358 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) { 1359 assert(id < last_space_id, "id out of range"); 1360 return _space_info[id].start_array(); 1361 } 1362 1363 inline bool PSParallelCompact::should_update_klass(klassOop k) { 1364 return ((HeapWord*) k) >= dense_prefix(perm_space_id); 1365 } 1366 1367 #ifdef ASSERT 1368 inline void 1369 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) 1370 { 1371 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), 1372 "must move left or to a different space"); 1373 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr), 1374 "checking alignment"); 1375 } 1376 #endif // ASSERT 1377 1378 class MoveAndUpdateClosure: public ParMarkBitMapClosure { 1379 public: 1380 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, 1381 ObjectStartArray* start_array, 1382 HeapWord* destination, size_t words); 1383 1384 // Accessors. 1385 HeapWord* destination() const { return _destination; } 1386 1387 // If the object will fit (size <= words_remaining()), copy it to the current 1388 // destination, update the interior oops and the start array and return either 1389 // full (if the closure is full) or incomplete. If the object will not fit, 1390 // return would_overflow. 1391 virtual IterationStatus do_addr(HeapWord* addr, size_t size); 1392 1393 // Copy enough words to fill this closure, starting at source(). Interior 1394 // oops and the start array are not updated. Return full. 1395 IterationStatus copy_until_full(); 1396 1397 // Copy enough words to fill this closure or to the end of an object, 1398 // whichever is smaller, starting at source(). Interior oops and the start 1399 // array are not updated. 1400 void copy_partial_obj(); 1401 1402 protected: 1403 // Update variables to indicate that word_count words were processed. 1404 inline void update_state(size_t word_count); 1405 1406 protected: 1407 ObjectStartArray* const _start_array; 1408 HeapWord* _destination; // Next addr to be written. 1409 }; 1410 1411 inline 1412 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, 1413 ParCompactionManager* cm, 1414 ObjectStartArray* start_array, 1415 HeapWord* destination, 1416 size_t words) : 1417 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array) 1418 { 1419 _destination = destination; 1420 } 1421 1422 inline void MoveAndUpdateClosure::update_state(size_t words) 1423 { 1424 decrement_words_remaining(words); 1425 _source += words; 1426 _destination += words; 1427 } 1428 1429 class UpdateOnlyClosure: public ParMarkBitMapClosure { 1430 private: 1431 const PSParallelCompact::SpaceId _space_id; 1432 ObjectStartArray* const _start_array; 1433 1434 public: 1435 UpdateOnlyClosure(ParMarkBitMap* mbm, 1436 ParCompactionManager* cm, 1437 PSParallelCompact::SpaceId space_id); 1438 1439 // Update the object. 1440 virtual IterationStatus do_addr(HeapWord* addr, size_t words); 1441 1442 inline void do_addr(HeapWord* addr); 1443 }; 1444 1445 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) 1446 { 1447 _start_array->allocate_block(addr); 1448 oop(addr)->update_contents(compaction_manager()); 1449 } 1450 1451 class FillClosure: public ParMarkBitMapClosure 1452 { 1453 public: 1454 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : 1455 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), 1456 _start_array(PSParallelCompact::start_array(space_id)) 1457 { 1458 assert(space_id == PSParallelCompact::perm_space_id || 1459 space_id == PSParallelCompact::old_space_id, 1460 "cannot use FillClosure in the young gen"); 1461 } 1462 1463 virtual IterationStatus do_addr(HeapWord* addr, size_t size) { 1464 CollectedHeap::fill_with_objects(addr, size); 1465 HeapWord* const end = addr + size; 1466 do { 1467 _start_array->allocate_block(addr); 1468 addr += oop(addr)->size(); 1469 } while (addr < end); 1470 return ParMarkBitMap::incomplete; 1471 } 1472 1473 private: 1474 ObjectStartArray* const _start_array; 1475 }; 1476 1477 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP