1 /* 2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP 27 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 29 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 30 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" 31 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" 32 #include "gc_implementation/shared/collectorCounters.hpp" 33 #include "gc_implementation/shared/mutableSpace.hpp" 34 #include "gc_interface/collectedHeap.hpp" 35 #include "oops/oop.hpp" 36 37 class ParallelScavengeHeap; 38 class PSAdaptiveSizePolicy; 39 class PSYoungGen; 40 class PSOldGen; 41 class ParCompactionManager; 42 class ParallelTaskTerminator; 43 class PSParallelCompact; 44 class GCTaskManager; 45 class GCTaskQueue; 46 class PreGCValues; 47 class MoveAndUpdateClosure; 48 class RefProcTaskExecutor; 49 class ParallelOldTracer; 50 class STWGCTimer; 51 52 // The SplitInfo class holds the information needed to 'split' a source region 53 // so that the live data can be copied to two destination *spaces*. Normally, 54 // all the live data in a region is copied to a single destination space (e.g., 55 // everything live in a region in eden is copied entirely into the old gen). 56 // However, when the heap is nearly full, all the live data in eden may not fit 57 // into the old gen. Copying only some of the regions from eden to old gen 58 // requires finding a region that does not contain a partial object (i.e., no 59 // live object crosses the region boundary) somewhere near the last object that 60 // does fit into the old gen. Since it's not always possible to find such a 61 // region, splitting is necessary for predictable behavior. 62 // 63 // A region is always split at the end of the partial object. This avoids 64 // additional tests when calculating the new location of a pointer, which is a 65 // very hot code path. The partial object and everything to its left will be 66 // copied to another space (call it dest_space_1). The live data to the right 67 // of the partial object will be copied either within the space itself, or to a 68 // different destination space (distinct from dest_space_1). 69 // 70 // Split points are identified during the summary phase, when region 71 // destinations are computed: data about the split, including the 72 // partial_object_size, is recorded in a SplitInfo record and the 73 // partial_object_size field in the summary data is set to zero. The zeroing is 74 // possible (and necessary) since the partial object will move to a different 75 // destination space than anything to its right, thus the partial object should 76 // not affect the locations of any objects to its right. 77 // 78 // The recorded data is used during the compaction phase, but only rarely: when 79 // the partial object on the split region will be copied across a destination 80 // region boundary. This test is made once each time a region is filled, and is 81 // a simple address comparison, so the overhead is negligible (see 82 // PSParallelCompact::first_src_addr()). 83 // 84 // Notes: 85 // 86 // Only regions with partial objects are split; a region without a partial 87 // object does not need any extra bookkeeping. 88 // 89 // At most one region is split per space, so the amount of data required is 90 // constant. 91 // 92 // A region is split only when the destination space would overflow. Once that 93 // happens, the destination space is abandoned and no other data (even from 94 // other source spaces) is targeted to that destination space. Abandoning the 95 // destination space may leave a somewhat large unused area at the end, if a 96 // large object caused the overflow. 97 // 98 // Future work: 99 // 100 // More bookkeeping would be required to continue to use the destination space. 101 // The most general solution would allow data from regions in two different 102 // source spaces to be "joined" in a single destination region. At the very 103 // least, additional code would be required in next_src_region() to detect the 104 // join and skip to an out-of-order source region. If the join region was also 105 // the last destination region to which a split region was copied (the most 106 // likely case), then additional work would be needed to get fill_region() to 107 // stop iteration and switch to a new source region at the right point. Basic 108 // idea would be to use a fake value for the top of the source space. It is 109 // doable, if a bit tricky. 110 // 111 // A simpler (but less general) solution would fill the remainder of the 112 // destination region with a dummy object and continue filling the next 113 // destination region. 114 115 class SplitInfo 116 { 117 public: 118 // Return true if this split info is valid (i.e., if a split has been 119 // recorded). The very first region cannot have a partial object and thus is 120 // never split, so 0 is the 'invalid' value. 121 bool is_valid() const { return _src_region_idx > 0; } 122 123 // Return true if this split holds data for the specified source region. 124 inline bool is_split(size_t source_region) const; 125 126 // The index of the split region, the size of the partial object on that 127 // region and the destination of the partial object. 128 size_t src_region_idx() const { return _src_region_idx; } 129 size_t partial_obj_size() const { return _partial_obj_size; } 130 HeapWord* destination() const { return _destination; } 131 132 // The destination count of the partial object referenced by this split 133 // (either 1 or 2). This must be added to the destination count of the 134 // remainder of the source region. 135 unsigned int destination_count() const { return _destination_count; } 136 137 // If a word within the partial object will be written to the first word of a 138 // destination region, this is the address of the destination region; 139 // otherwise this is NULL. 140 HeapWord* dest_region_addr() const { return _dest_region_addr; } 141 142 // If a word within the partial object will be written to the first word of a 143 // destination region, this is the address of that word within the partial 144 // object; otherwise this is NULL. 145 HeapWord* first_src_addr() const { return _first_src_addr; } 146 147 // Record the data necessary to split the region src_region_idx. 148 void record(size_t src_region_idx, size_t partial_obj_size, 149 HeapWord* destination); 150 151 void clear(); 152 153 DEBUG_ONLY(void verify_clear();) 154 155 private: 156 size_t _src_region_idx; 157 size_t _partial_obj_size; 158 HeapWord* _destination; 159 unsigned int _destination_count; 160 HeapWord* _dest_region_addr; 161 HeapWord* _first_src_addr; 162 }; 163 164 inline bool SplitInfo::is_split(size_t region_idx) const 165 { 166 return _src_region_idx == region_idx && is_valid(); 167 } 168 169 class SpaceInfo 170 { 171 public: 172 MutableSpace* space() const { return _space; } 173 174 // Where the free space will start after the collection. Valid only after the 175 // summary phase completes. 176 HeapWord* new_top() const { return _new_top; } 177 178 // Allows new_top to be set. 179 HeapWord** new_top_addr() { return &_new_top; } 180 181 // Where the smallest allowable dense prefix ends (used only for perm gen). 182 HeapWord* min_dense_prefix() const { return _min_dense_prefix; } 183 184 // Where the dense prefix ends, or the compacted region begins. 185 HeapWord* dense_prefix() const { return _dense_prefix; } 186 187 // The start array for the (generation containing the) space, or NULL if there 188 // is no start array. 189 ObjectStartArray* start_array() const { return _start_array; } 190 191 SplitInfo& split_info() { return _split_info; } 192 193 void set_space(MutableSpace* s) { _space = s; } 194 void set_new_top(HeapWord* addr) { _new_top = addr; } 195 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } 196 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } 197 void set_start_array(ObjectStartArray* s) { _start_array = s; } 198 199 void publish_new_top() const { _space->set_top(_new_top); } 200 201 private: 202 MutableSpace* _space; 203 HeapWord* _new_top; 204 HeapWord* _min_dense_prefix; 205 HeapWord* _dense_prefix; 206 ObjectStartArray* _start_array; 207 SplitInfo _split_info; 208 }; 209 210 class ParallelCompactData 211 { 212 public: 213 // Sizes are in HeapWords, unless indicated otherwise. 214 static const size_t Log2RegionSize; 215 static const size_t RegionSize; 216 static const size_t RegionSizeBytes; 217 218 // Mask for the bits in a size_t to get an offset within a region. 219 static const size_t RegionSizeOffsetMask; 220 // Mask for the bits in a pointer to get an offset within a region. 221 static const size_t RegionAddrOffsetMask; 222 // Mask for the bits in a pointer to get the address of the start of a region. 223 static const size_t RegionAddrMask; 224 225 static const size_t Log2BlockSize; 226 static const size_t BlockSize; 227 static const size_t BlockSizeBytes; 228 229 static const size_t BlockSizeOffsetMask; 230 static const size_t BlockAddrOffsetMask; 231 static const size_t BlockAddrMask; 232 233 static const size_t BlocksPerRegion; 234 static const size_t Log2BlocksPerRegion; 235 236 class RegionData 237 { 238 public: 239 // Destination address of the region. 240 HeapWord* destination() const { return _destination; } 241 242 // The first region containing data destined for this region. 243 size_t source_region() const { return _source_region; } 244 245 // The object (if any) starting in this region and ending in a different 246 // region that could not be updated during the main (parallel) compaction 247 // phase. This is different from _partial_obj_addr, which is an object that 248 // extends onto a source region. However, the two uses do not overlap in 249 // time, so the same field is used to save space. 250 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; } 251 252 // The starting address of the partial object extending onto the region. 253 HeapWord* partial_obj_addr() const { return _partial_obj_addr; } 254 255 // Size of the partial object extending onto the region (words). 256 size_t partial_obj_size() const { return _partial_obj_size; } 257 258 // Size of live data that lies within this region due to objects that start 259 // in this region (words). This does not include the partial object 260 // extending onto the region (if any), or the part of an object that extends 261 // onto the next region (if any). 262 size_t live_obj_size() const { return _dc_and_los & los_mask; } 263 264 // Total live data that lies within the region (words). 265 size_t data_size() const { return partial_obj_size() + live_obj_size(); } 266 267 // The destination_count is the number of other regions to which data from 268 // this region will be copied. At the end of the summary phase, the valid 269 // values of destination_count are 270 // 271 // 0 - data from the region will be compacted completely into itself, or the 272 // region is empty. The region can be claimed and then filled. 273 // 1 - data from the region will be compacted into 1 other region; some 274 // data from the region may also be compacted into the region itself. 275 // 2 - data from the region will be copied to 2 other regions. 276 // 277 // During compaction as regions are emptied, the destination_count is 278 // decremented (atomically) and when it reaches 0, it can be claimed and 279 // then filled. 280 // 281 // A region is claimed for processing by atomically changing the 282 // destination_count to the claimed value (dc_claimed). After a region has 283 // been filled, the destination_count should be set to the completed value 284 // (dc_completed). 285 inline uint destination_count() const; 286 inline uint destination_count_raw() const; 287 288 // Whether the block table for this region has been filled. 289 inline bool blocks_filled() const; 290 291 // Number of times the block table was filled. 292 DEBUG_ONLY(inline size_t blocks_filled_count() const;) 293 294 // The location of the java heap data that corresponds to this region. 295 inline HeapWord* data_location() const; 296 297 // The highest address referenced by objects in this region. 298 inline HeapWord* highest_ref() const; 299 300 // Whether this region is available to be claimed, has been claimed, or has 301 // been completed. 302 // 303 // Minor subtlety: claimed() returns true if the region is marked 304 // completed(), which is desirable since a region must be claimed before it 305 // can be completed. 306 bool available() const { return _dc_and_los < dc_one; } 307 bool claimed() const { return _dc_and_los >= dc_claimed; } 308 bool completed() const { return _dc_and_los >= dc_completed; } 309 310 // These are not atomic. 311 void set_destination(HeapWord* addr) { _destination = addr; } 312 void set_source_region(size_t region) { _source_region = region; } 313 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 314 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 315 void set_partial_obj_size(size_t words) { 316 _partial_obj_size = (region_sz_t) words; 317 } 318 inline void set_blocks_filled(); 319 320 inline void set_destination_count(uint count); 321 inline void set_live_obj_size(size_t words); 322 inline void set_data_location(HeapWord* addr); 323 inline void set_completed(); 324 inline bool claim_unsafe(); 325 326 // These are atomic. 327 inline void add_live_obj(size_t words); 328 inline void set_highest_ref(HeapWord* addr); 329 inline void decrement_destination_count(); 330 inline bool claim(); 331 332 private: 333 // The type used to represent object sizes within a region. 334 typedef uint region_sz_t; 335 336 // Constants for manipulating the _dc_and_los field, which holds both the 337 // destination count and live obj size. The live obj size lives at the 338 // least significant end so no masking is necessary when adding. 339 static const region_sz_t dc_shift; // Shift amount. 340 static const region_sz_t dc_mask; // Mask for destination count. 341 static const region_sz_t dc_one; // 1, shifted appropriately. 342 static const region_sz_t dc_claimed; // Region has been claimed. 343 static const region_sz_t dc_completed; // Region has been completed. 344 static const region_sz_t los_mask; // Mask for live obj size. 345 346 HeapWord* _destination; 347 size_t _source_region; 348 HeapWord* _partial_obj_addr; 349 region_sz_t _partial_obj_size; 350 region_sz_t volatile _dc_and_los; 351 bool _blocks_filled; 352 353 #ifdef ASSERT 354 size_t _blocks_filled_count; // Number of block table fills. 355 356 // These enable optimizations that are only partially implemented. Use 357 // debug builds to prevent the code fragments from breaking. 358 HeapWord* _data_location; 359 HeapWord* _highest_ref; 360 #endif // #ifdef ASSERT 361 362 #ifdef ASSERT 363 public: 364 uint _pushed; // 0 until region is pushed onto a stack 365 private: 366 #endif 367 }; 368 369 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block 370 // holds an offset, which is the amount of live data in the Region to the left 371 // of the first live object that starts in the Block. 372 class BlockData 373 { 374 public: 375 typedef unsigned short int blk_ofs_t; 376 377 blk_ofs_t offset() const { return _offset; } 378 void set_offset(size_t val) { _offset = (blk_ofs_t)val; } 379 380 private: 381 blk_ofs_t _offset; 382 }; 383 384 public: 385 ParallelCompactData(); 386 bool initialize(MemRegion covered_region); 387 388 size_t region_count() const { return _region_count; } 389 size_t reserved_byte_size() const { return _reserved_byte_size; } 390 391 // Convert region indices to/from RegionData pointers. 392 inline RegionData* region(size_t region_idx) const; 393 inline size_t region(const RegionData* const region_ptr) const; 394 395 size_t block_count() const { return _block_count; } 396 inline BlockData* block(size_t block_idx) const; 397 inline size_t block(const BlockData* block_ptr) const; 398 399 void add_obj(HeapWord* addr, size_t len); 400 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } 401 402 // Fill in the regions covering [beg, end) so that no data moves; i.e., the 403 // destination of region n is simply the start of region n. The argument beg 404 // must be region-aligned; end need not be. 405 void summarize_dense_prefix(HeapWord* beg, HeapWord* end); 406 407 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info, 408 HeapWord* destination, HeapWord* target_end, 409 HeapWord** target_next); 410 bool summarize(SplitInfo& split_info, 411 HeapWord* source_beg, HeapWord* source_end, 412 HeapWord** source_next, 413 HeapWord* target_beg, HeapWord* target_end, 414 HeapWord** target_next); 415 416 void clear(); 417 void clear_range(size_t beg_region, size_t end_region); 418 void clear_range(HeapWord* beg, HeapWord* end) { 419 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end)); 420 } 421 422 // Return the number of words between addr and the start of the region 423 // containing addr. 424 inline size_t region_offset(const HeapWord* addr) const; 425 426 // Convert addresses to/from a region index or region pointer. 427 inline size_t addr_to_region_idx(const HeapWord* addr) const; 428 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const; 429 inline HeapWord* region_to_addr(size_t region) const; 430 inline HeapWord* region_to_addr(size_t region, size_t offset) const; 431 inline HeapWord* region_to_addr(const RegionData* region) const; 432 433 inline HeapWord* region_align_down(HeapWord* addr) const; 434 inline HeapWord* region_align_up(HeapWord* addr) const; 435 inline bool is_region_aligned(HeapWord* addr) const; 436 437 // Analogous to region_offset() for blocks. 438 size_t block_offset(const HeapWord* addr) const; 439 size_t addr_to_block_idx(const HeapWord* addr) const; 440 size_t addr_to_block_idx(const oop obj) const { 441 return addr_to_block_idx((HeapWord*) obj); 442 } 443 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; 444 inline HeapWord* block_to_addr(size_t block) const; 445 inline size_t region_to_block_idx(size_t region) const; 446 447 inline HeapWord* block_align_down(HeapWord* addr) const; 448 inline HeapWord* block_align_up(HeapWord* addr) const; 449 inline bool is_block_aligned(HeapWord* addr) const; 450 451 // Return the address one past the end of the partial object. 452 HeapWord* partial_obj_end(size_t region_idx) const; 453 454 // Return the location of the object after compaction. 455 HeapWord* calc_new_pointer(HeapWord* addr); 456 457 HeapWord* calc_new_pointer(oop p) { 458 return calc_new_pointer((HeapWord*) p); 459 } 460 461 #ifdef ASSERT 462 void verify_clear(const PSVirtualSpace* vspace); 463 void verify_clear(); 464 #endif // #ifdef ASSERT 465 466 private: 467 bool initialize_block_data(); 468 bool initialize_region_data(size_t region_size); 469 PSVirtualSpace* create_vspace(size_t count, size_t element_size); 470 471 private: 472 HeapWord* _region_start; 473 #ifdef ASSERT 474 HeapWord* _region_end; 475 #endif // #ifdef ASSERT 476 477 PSVirtualSpace* _region_vspace; 478 size_t _reserved_byte_size; 479 RegionData* _region_data; 480 size_t _region_count; 481 482 PSVirtualSpace* _block_vspace; 483 BlockData* _block_data; 484 size_t _block_count; 485 }; 486 487 inline uint 488 ParallelCompactData::RegionData::destination_count_raw() const 489 { 490 return _dc_and_los & dc_mask; 491 } 492 493 inline uint 494 ParallelCompactData::RegionData::destination_count() const 495 { 496 return destination_count_raw() >> dc_shift; 497 } 498 499 inline bool 500 ParallelCompactData::RegionData::blocks_filled() const 501 { 502 return _blocks_filled; 503 } 504 505 #ifdef ASSERT 506 inline size_t 507 ParallelCompactData::RegionData::blocks_filled_count() const 508 { 509 return _blocks_filled_count; 510 } 511 #endif // #ifdef ASSERT 512 513 inline void 514 ParallelCompactData::RegionData::set_blocks_filled() 515 { 516 _blocks_filled = true; 517 // Debug builds count the number of times the table was filled. 518 DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count)); 519 } 520 521 inline void 522 ParallelCompactData::RegionData::set_destination_count(uint count) 523 { 524 assert(count <= (dc_completed >> dc_shift), "count too large"); 525 const region_sz_t live_sz = (region_sz_t) live_obj_size(); 526 _dc_and_los = (count << dc_shift) | live_sz; 527 } 528 529 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words) 530 { 531 assert(words <= los_mask, "would overflow"); 532 _dc_and_los = destination_count_raw() | (region_sz_t)words; 533 } 534 535 inline void ParallelCompactData::RegionData::decrement_destination_count() 536 { 537 assert(_dc_and_los < dc_claimed, "already claimed"); 538 assert(_dc_and_los >= dc_one, "count would go negative"); 539 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); 540 } 541 542 inline HeapWord* ParallelCompactData::RegionData::data_location() const 543 { 544 DEBUG_ONLY(return _data_location;) 545 NOT_DEBUG(return NULL;) 546 } 547 548 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const 549 { 550 DEBUG_ONLY(return _highest_ref;) 551 NOT_DEBUG(return NULL;) 552 } 553 554 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr) 555 { 556 DEBUG_ONLY(_data_location = addr;) 557 } 558 559 inline void ParallelCompactData::RegionData::set_completed() 560 { 561 assert(claimed(), "must be claimed first"); 562 _dc_and_los = dc_completed | (region_sz_t) live_obj_size(); 563 } 564 565 // MT-unsafe claiming of a region. Should only be used during single threaded 566 // execution. 567 inline bool ParallelCompactData::RegionData::claim_unsafe() 568 { 569 if (available()) { 570 _dc_and_los |= dc_claimed; 571 return true; 572 } 573 return false; 574 } 575 576 inline void ParallelCompactData::RegionData::add_live_obj(size_t words) 577 { 578 assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); 579 Atomic::add((int) words, (volatile int*) &_dc_and_los); 580 } 581 582 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) 583 { 584 #ifdef ASSERT 585 HeapWord* tmp = _highest_ref; 586 while (addr > tmp) { 587 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); 588 } 589 #endif // #ifdef ASSERT 590 } 591 592 inline bool ParallelCompactData::RegionData::claim() 593 { 594 const int los = (int) live_obj_size(); 595 const int old = Atomic::cmpxchg(dc_claimed | los, 596 (volatile int*) &_dc_and_los, los); 597 return old == los; 598 } 599 600 inline ParallelCompactData::RegionData* 601 ParallelCompactData::region(size_t region_idx) const 602 { 603 assert(region_idx <= region_count(), "bad arg"); 604 return _region_data + region_idx; 605 } 606 607 inline size_t 608 ParallelCompactData::region(const RegionData* const region_ptr) const 609 { 610 assert(region_ptr >= _region_data, "bad arg"); 611 assert(region_ptr <= _region_data + region_count(), "bad arg"); 612 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); 613 } 614 615 inline ParallelCompactData::BlockData* 616 ParallelCompactData::block(size_t n) const { 617 assert(n < block_count(), "bad arg"); 618 return _block_data + n; 619 } 620 621 inline size_t 622 ParallelCompactData::region_offset(const HeapWord* addr) const 623 { 624 assert(addr >= _region_start, "bad addr"); 625 assert(addr <= _region_end, "bad addr"); 626 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize; 627 } 628 629 inline size_t 630 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const 631 { 632 assert(addr >= _region_start, "bad addr"); 633 assert(addr <= _region_end, "bad addr"); 634 return pointer_delta(addr, _region_start) >> Log2RegionSize; 635 } 636 637 inline ParallelCompactData::RegionData* 638 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const 639 { 640 return region(addr_to_region_idx(addr)); 641 } 642 643 inline HeapWord* 644 ParallelCompactData::region_to_addr(size_t region) const 645 { 646 assert(region <= _region_count, "region out of range"); 647 return _region_start + (region << Log2RegionSize); 648 } 649 650 inline HeapWord* 651 ParallelCompactData::region_to_addr(const RegionData* region) const 652 { 653 return region_to_addr(pointer_delta(region, _region_data, 654 sizeof(RegionData))); 655 } 656 657 inline HeapWord* 658 ParallelCompactData::region_to_addr(size_t region, size_t offset) const 659 { 660 assert(region <= _region_count, "region out of range"); 661 assert(offset < RegionSize, "offset too big"); // This may be too strict. 662 return region_to_addr(region) + offset; 663 } 664 665 inline HeapWord* 666 ParallelCompactData::region_align_down(HeapWord* addr) const 667 { 668 assert(addr >= _region_start, "bad addr"); 669 assert(addr < _region_end + RegionSize, "bad addr"); 670 return (HeapWord*)(size_t(addr) & RegionAddrMask); 671 } 672 673 inline HeapWord* 674 ParallelCompactData::region_align_up(HeapWord* addr) const 675 { 676 assert(addr >= _region_start, "bad addr"); 677 assert(addr <= _region_end, "bad addr"); 678 return region_align_down(addr + RegionSizeOffsetMask); 679 } 680 681 inline bool 682 ParallelCompactData::is_region_aligned(HeapWord* addr) const 683 { 684 return region_offset(addr) == 0; 685 } 686 687 inline size_t 688 ParallelCompactData::block_offset(const HeapWord* addr) const 689 { 690 assert(addr >= _region_start, "bad addr"); 691 assert(addr <= _region_end, "bad addr"); 692 return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize; 693 } 694 695 inline size_t 696 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const 697 { 698 assert(addr >= _region_start, "bad addr"); 699 assert(addr <= _region_end, "bad addr"); 700 return pointer_delta(addr, _region_start) >> Log2BlockSize; 701 } 702 703 inline ParallelCompactData::BlockData* 704 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const 705 { 706 return block(addr_to_block_idx(addr)); 707 } 708 709 inline HeapWord* 710 ParallelCompactData::block_to_addr(size_t block) const 711 { 712 assert(block < _block_count, "block out of range"); 713 return _region_start + (block << Log2BlockSize); 714 } 715 716 inline size_t 717 ParallelCompactData::region_to_block_idx(size_t region) const 718 { 719 return region << Log2BlocksPerRegion; 720 } 721 722 inline HeapWord* 723 ParallelCompactData::block_align_down(HeapWord* addr) const 724 { 725 assert(addr >= _region_start, "bad addr"); 726 assert(addr < _region_end + RegionSize, "bad addr"); 727 return (HeapWord*)(size_t(addr) & BlockAddrMask); 728 } 729 730 inline HeapWord* 731 ParallelCompactData::block_align_up(HeapWord* addr) const 732 { 733 assert(addr >= _region_start, "bad addr"); 734 assert(addr <= _region_end, "bad addr"); 735 return block_align_down(addr + BlockSizeOffsetMask); 736 } 737 738 inline bool 739 ParallelCompactData::is_block_aligned(HeapWord* addr) const 740 { 741 return block_offset(addr) == 0; 742 } 743 744 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the 745 // do_addr() method. 746 // 747 // The closure is initialized with the number of heap words to process 748 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr() 749 // methods in subclasses should update the total as words are processed. Since 750 // only one subclass actually uses this mechanism to terminate iteration, the 751 // default initial value is > 0. The implementation is here and not in the 752 // single subclass that uses it to avoid making is_full() virtual, and thus 753 // adding a virtual call per live object. 754 755 class ParMarkBitMapClosure: public StackObj { 756 public: 757 typedef ParMarkBitMap::idx_t idx_t; 758 typedef ParMarkBitMap::IterationStatus IterationStatus; 759 760 public: 761 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, 762 size_t words = max_uintx); 763 764 inline ParCompactionManager* compaction_manager() const; 765 inline ParMarkBitMap* bitmap() const; 766 inline size_t words_remaining() const; 767 inline bool is_full() const; 768 inline HeapWord* source() const; 769 770 inline void set_source(HeapWord* addr); 771 772 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0; 773 774 protected: 775 inline void decrement_words_remaining(size_t words); 776 777 private: 778 ParMarkBitMap* const _bitmap; 779 ParCompactionManager* const _compaction_manager; 780 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger. 781 size_t _words_remaining; // Words left to copy. 782 783 protected: 784 HeapWord* _source; // Next addr that would be read. 785 }; 786 787 inline 788 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap, 789 ParCompactionManager* cm, 790 size_t words): 791 _bitmap(bitmap), _compaction_manager(cm) 792 #ifdef ASSERT 793 , _initial_words_remaining(words) 794 #endif 795 { 796 _words_remaining = words; 797 _source = NULL; 798 } 799 800 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const { 801 return _compaction_manager; 802 } 803 804 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const { 805 return _bitmap; 806 } 807 808 inline size_t ParMarkBitMapClosure::words_remaining() const { 809 return _words_remaining; 810 } 811 812 inline bool ParMarkBitMapClosure::is_full() const { 813 return words_remaining() == 0; 814 } 815 816 inline HeapWord* ParMarkBitMapClosure::source() const { 817 return _source; 818 } 819 820 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) { 821 _source = addr; 822 } 823 824 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) { 825 assert(_words_remaining >= words, "processed too many words"); 826 _words_remaining -= words; 827 } 828 829 // The UseParallelOldGC collector is a stop-the-world garbage collector that 830 // does parts of the collection using parallel threads. The collection includes 831 // the tenured generation and the young generation. The permanent generation is 832 // collected at the same time as the other two generations but the permanent 833 // generation is collect by a single GC thread. The permanent generation is 834 // collected serially because of the requirement that during the processing of a 835 // klass AAA, any objects reference by AAA must already have been processed. 836 // This requirement is enforced by a left (lower address) to right (higher 837 // address) sliding compaction. 838 // 839 // There are four phases of the collection. 840 // 841 // - marking phase 842 // - summary phase 843 // - compacting phase 844 // - clean up phase 845 // 846 // Roughly speaking these phases correspond, respectively, to 847 // - mark all the live objects 848 // - calculate the destination of each object at the end of the collection 849 // - move the objects to their destination 850 // - update some references and reinitialize some variables 851 // 852 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The 853 // marking phase is implemented in PSParallelCompact::marking_phase() and does a 854 // complete marking of the heap. The summary phase is implemented in 855 // PSParallelCompact::summary_phase(). The move and update phase is implemented 856 // in PSParallelCompact::compact(). 857 // 858 // A space that is being collected is divided into regions and with each region 859 // is associated an object of type ParallelCompactData. Each region is of a 860 // fixed size and typically will contain more than 1 object and may have parts 861 // of objects at the front and back of the region. 862 // 863 // region -----+---------------------+---------- 864 // objects covered [ AAA )[ BBB )[ CCC )[ DDD ) 865 // 866 // The marking phase does a complete marking of all live objects in the heap. 867 // The marking also compiles the size of the data for all live objects covered 868 // by the region. This size includes the part of any live object spanning onto 869 // the region (part of AAA if it is live) from the front, all live objects 870 // contained in the region (BBB and/or CCC if they are live), and the part of 871 // any live objects covered by the region that extends off the region (part of 872 // DDD if it is live). The marking phase uses multiple GC threads and marking 873 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is 874 // done atomically as is the accumulation of the size of the live objects 875 // covered by a region. 876 // 877 // The summary phase calculates the total live data to the left of each region 878 // XXX. Based on that total and the bottom of the space, it can calculate the 879 // starting location of the live data in XXX. The summary phase calculates for 880 // each region XXX quantities such as 881 // 882 // - the amount of live data at the beginning of a region from an object 883 // entering the region. 884 // - the location of the first live data on the region 885 // - a count of the number of regions receiving live data from XXX. 886 // 887 // See ParallelCompactData for precise details. The summary phase also 888 // calculates the dense prefix for the compaction. The dense prefix is a 889 // portion at the beginning of the space that is not moved. The objects in the 890 // dense prefix do need to have their object references updated. See method 891 // summarize_dense_prefix(). 892 // 893 // The summary phase is done using 1 GC thread. 894 // 895 // The compaction phase moves objects to their new location and updates all 896 // references in the object. 897 // 898 // A current exception is that objects that cross a region boundary are moved 899 // but do not have their references updated. References are not updated because 900 // it cannot easily be determined if the klass pointer KKK for the object AAA 901 // has been updated. KKK likely resides in a region to the left of the region 902 // containing AAA. These AAA's have there references updated at the end in a 903 // clean up phase. See the method PSParallelCompact::update_deferred_objects(). 904 // An alternate strategy is being investigated for this deferral of updating. 905 // 906 // Compaction is done on a region basis. A region that is ready to be filled is 907 // put on a ready list and GC threads take region off the list and fill them. A 908 // region is ready to be filled if it empty of live objects. Such a region may 909 // have been initially empty (only contained dead objects) or may have had all 910 // its live objects copied out already. A region that compacts into itself is 911 // also ready for filling. The ready list is initially filled with empty 912 // regions and regions compacting into themselves. There is always at least 1 913 // region that can be put on the ready list. The regions are atomically added 914 // and removed from the ready list. 915 916 class PSParallelCompact : AllStatic { 917 public: 918 // Convenient access to type names. 919 typedef ParMarkBitMap::idx_t idx_t; 920 typedef ParallelCompactData::RegionData RegionData; 921 typedef ParallelCompactData::BlockData BlockData; 922 923 typedef enum { 924 old_space_id, eden_space_id, 925 from_space_id, to_space_id, last_space_id 926 } SpaceId; 927 928 public: 929 // Inline closure decls 930 // 931 class IsAliveClosure: public BoolObjectClosure { 932 public: 933 virtual bool do_object_b(oop p); 934 }; 935 936 class AdjustPointerClosure: public ExtendedOopClosure { 937 public: 938 template <typename T> void do_oop_nv(T* p); 939 virtual void do_oop(oop* p); 940 virtual void do_oop(narrowOop* p); 941 942 // This closure provides its own oop verification code. 943 debug_only(virtual bool should_verify_oops() { return false; }) 944 }; 945 946 class AdjustKlassClosure : public KlassClosure { 947 public: 948 void do_klass(Klass* klass); 949 }; 950 951 friend class FollowStackClosure; 952 friend class AdjustPointerClosure; 953 friend class AdjustKlassClosure; 954 friend class FollowKlassClosure; 955 friend class InstanceClassLoaderKlass; 956 friend class RefProcTaskProxy; 957 958 private: 959 static STWGCTimer _gc_timer; 960 static ParallelOldTracer _gc_tracer; 961 static elapsedTimer _accumulated_time; 962 static unsigned int _total_invocations; 963 static unsigned int _maximum_compaction_gc_num; 964 static jlong _time_of_last_gc; // ms 965 static CollectorCounters* _counters; 966 static ParMarkBitMap _mark_bitmap; 967 static ParallelCompactData _summary_data; 968 static IsAliveClosure _is_alive_closure; 969 static SpaceInfo _space_info[last_space_id]; 970 static bool _print_phases; 971 static AdjustPointerClosure _adjust_pointer_closure; 972 static AdjustKlassClosure _adjust_klass_closure; 973 974 // Reference processing (used in ...follow_contents) 975 static ReferenceProcessor* _ref_processor; 976 977 // Updated location of intArrayKlassObj. 978 static Klass* _updated_int_array_klass_obj; 979 980 // Values computed at initialization and used by dead_wood_limiter(). 981 static double _dwl_mean; 982 static double _dwl_std_dev; 983 static double _dwl_first_term; 984 static double _dwl_adjustment; 985 #ifdef ASSERT 986 static bool _dwl_initialized; 987 #endif // #ifdef ASSERT 988 989 990 public: 991 static ParallelOldTracer* gc_tracer() { return &_gc_tracer; } 992 993 private: 994 995 static void initialize_space_info(); 996 997 // Return true if details about individual phases should be printed. 998 static inline bool print_phases(); 999 1000 // Clear the marking bitmap and summary data that cover the specified space. 1001 static void clear_data_covering_space(SpaceId id); 1002 1003 static void pre_compact(PreGCValues* pre_gc_values); 1004 static void post_compact(); 1005 1006 // Mark live objects 1007 static void marking_phase(ParCompactionManager* cm, 1008 bool maximum_heap_compaction, 1009 ParallelOldTracer *gc_tracer); 1010 1011 // Compute the dense prefix for the designated space. This is an experimental 1012 // implementation currently not used in production. 1013 static HeapWord* compute_dense_prefix_via_density(const SpaceId id, 1014 bool maximum_compaction); 1015 1016 // Methods used to compute the dense prefix. 1017 1018 // Compute the value of the normal distribution at x = density. The mean and 1019 // standard deviation are values saved by initialize_dead_wood_limiter(). 1020 static inline double normal_distribution(double density); 1021 1022 // Initialize the static vars used by dead_wood_limiter(). 1023 static void initialize_dead_wood_limiter(); 1024 1025 // Return the percentage of space that can be treated as "dead wood" (i.e., 1026 // not reclaimed). 1027 static double dead_wood_limiter(double density, size_t min_percent); 1028 1029 // Find the first (left-most) region in the range [beg, end) that has at least 1030 // dead_words of dead space to the left. The argument beg must be the first 1031 // region in the space that is not completely live. 1032 static RegionData* dead_wood_limit_region(const RegionData* beg, 1033 const RegionData* end, 1034 size_t dead_words); 1035 1036 // Return a pointer to the first region in the range [beg, end) that is not 1037 // completely full. 1038 static RegionData* first_dead_space_region(const RegionData* beg, 1039 const RegionData* end); 1040 1041 // Return a value indicating the benefit or 'yield' if the compacted region 1042 // were to start (or equivalently if the dense prefix were to end) at the 1043 // candidate region. Higher values are better. 1044 // 1045 // The value is based on the amount of space reclaimed vs. the costs of (a) 1046 // updating references in the dense prefix plus (b) copying objects and 1047 // updating references in the compacted region. 1048 static inline double reclaimed_ratio(const RegionData* const candidate, 1049 HeapWord* const bottom, 1050 HeapWord* const top, 1051 HeapWord* const new_top); 1052 1053 // Compute the dense prefix for the designated space. 1054 static HeapWord* compute_dense_prefix(const SpaceId id, 1055 bool maximum_compaction); 1056 1057 // Return true if dead space crosses onto the specified Region; bit must be 1058 // the bit index corresponding to the first word of the Region. 1059 static inline bool dead_space_crosses_boundary(const RegionData* region, 1060 idx_t bit); 1061 1062 // Summary phase utility routine to fill dead space (if any) at the dense 1063 // prefix boundary. Should only be called if the the dense prefix is 1064 // non-empty. 1065 static void fill_dense_prefix_end(SpaceId id); 1066 1067 // Clear the summary data source_region field for the specified addresses. 1068 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr); 1069 1070 #ifndef PRODUCT 1071 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot). 1072 1073 // Fill the region [start, start + words) with live object(s). Only usable 1074 // for the old and permanent generations. 1075 static void fill_with_live_objects(SpaceId id, HeapWord* const start, 1076 size_t words); 1077 // Include the new objects in the summary data. 1078 static void summarize_new_objects(SpaceId id, HeapWord* start); 1079 1080 // Add live objects to a survivor space since it's rare that both survivors 1081 // are non-empty. 1082 static void provoke_split_fill_survivor(SpaceId id); 1083 1084 // Add live objects and/or choose the dense prefix to provoke splitting. 1085 static void provoke_split(bool & maximum_compaction); 1086 #endif 1087 1088 static void summarize_spaces_quick(); 1089 static void summarize_space(SpaceId id, bool maximum_compaction); 1090 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); 1091 1092 // Adjust addresses in roots. Does not adjust addresses in heap. 1093 static void adjust_roots(); 1094 1095 DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);) 1096 1097 // Move objects to new locations. 1098 static void compact_perm(ParCompactionManager* cm); 1099 static void compact(); 1100 1101 // Add available regions to the stack and draining tasks to the task queue. 1102 static void enqueue_region_draining_tasks(GCTaskQueue* q, 1103 uint parallel_gc_threads); 1104 1105 // Add dense prefix update tasks to the task queue. 1106 static void enqueue_dense_prefix_tasks(GCTaskQueue* q, 1107 uint parallel_gc_threads); 1108 1109 // Add region stealing tasks to the task queue. 1110 static void enqueue_region_stealing_tasks( 1111 GCTaskQueue* q, 1112 ParallelTaskTerminator* terminator_ptr, 1113 uint parallel_gc_threads); 1114 1115 // If objects are left in eden after a collection, try to move the boundary 1116 // and absorb them into the old gen. Returns true if eden was emptied. 1117 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 1118 PSYoungGen* young_gen, 1119 PSOldGen* old_gen); 1120 1121 // Reset time since last full gc 1122 static void reset_millis_since_last_gc(); 1123 1124 public: 1125 1126 PSParallelCompact(); 1127 1128 static void invoke(bool maximum_heap_compaction); 1129 static bool invoke_no_policy(bool maximum_heap_compaction); 1130 1131 static void post_initialize(); 1132 // Perform initialization for PSParallelCompact that requires 1133 // allocations. This should be called during the VM initialization 1134 // at a pointer where it would be appropriate to return a JNI_ENOMEM 1135 // in the event of a failure. 1136 static bool initialize(); 1137 1138 // Closure accessors 1139 static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() { 1140 return &_adjust_pointer_closure; 1141 } 1142 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } 1143 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } 1144 1145 // Public accessors 1146 static elapsedTimer* accumulated_time() { return &_accumulated_time; } 1147 static unsigned int total_invocations() { return _total_invocations; } 1148 static CollectorCounters* counters() { return _counters; } 1149 1150 // Used to add tasks 1151 static GCTaskManager* const gc_task_manager(); 1152 static Klass* updated_int_array_klass_obj() { 1153 return _updated_int_array_klass_obj; 1154 } 1155 1156 // Marking support 1157 static inline bool mark_obj(oop obj); 1158 static inline bool is_marked(oop obj); 1159 1160 template <class T> static inline void adjust_pointer(T* p); 1161 1162 static void follow_class_loader(ParCompactionManager* cm, 1163 ClassLoaderData* klass); 1164 1165 // Compaction support. 1166 // Return true if p is in the range [beg_addr, end_addr). 1167 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr); 1168 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr); 1169 1170 // Convenience wrappers for per-space data kept in _space_info. 1171 static inline MutableSpace* space(SpaceId space_id); 1172 static inline HeapWord* new_top(SpaceId space_id); 1173 static inline HeapWord* dense_prefix(SpaceId space_id); 1174 static inline ObjectStartArray* start_array(SpaceId space_id); 1175 1176 // Move and update the live objects in the specified space. 1177 static void move_and_update(ParCompactionManager* cm, SpaceId space_id); 1178 1179 // Process the end of the given region range in the dense prefix. 1180 // This includes saving any object not updated. 1181 static void dense_prefix_regions_epilogue(ParCompactionManager* cm, 1182 size_t region_start_index, 1183 size_t region_end_index, 1184 idx_t exiting_object_offset, 1185 idx_t region_offset_start, 1186 idx_t region_offset_end); 1187 1188 // Update a region in the dense prefix. For each live object 1189 // in the region, update it's interior references. For each 1190 // dead object, fill it with deadwood. Dead space at the end 1191 // of a region range will be filled to the start of the next 1192 // live object regardless of the region_index_end. None of the 1193 // objects in the dense prefix move and dead space is dead 1194 // (holds only dead objects that don't need any processing), so 1195 // dead space can be filled in any order. 1196 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, 1197 SpaceId space_id, 1198 size_t region_index_start, 1199 size_t region_index_end); 1200 1201 // Return the address of the count + 1st live word in the range [beg, end). 1202 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count); 1203 1204 // Return the address of the word to be copied to dest_addr, which must be 1205 // aligned to a region boundary. 1206 static HeapWord* first_src_addr(HeapWord* const dest_addr, 1207 SpaceId src_space_id, 1208 size_t src_region_idx); 1209 1210 // Determine the next source region, set closure.source() to the start of the 1211 // new region return the region index. Parameter end_addr is the address one 1212 // beyond the end of source range just processed. If necessary, switch to a 1213 // new source space and set src_space_id (in-out parameter) and src_space_top 1214 // (out parameter) accordingly. 1215 static size_t next_src_region(MoveAndUpdateClosure& closure, 1216 SpaceId& src_space_id, 1217 HeapWord*& src_space_top, 1218 HeapWord* end_addr); 1219 1220 // Decrement the destination count for each non-empty source region in the 1221 // range [beg_region, region(region_align_up(end_addr))). If the destination 1222 // count for a region goes to 0 and it needs to be filled, enqueue it. 1223 static void decrement_destination_counts(ParCompactionManager* cm, 1224 SpaceId src_space_id, 1225 size_t beg_region, 1226 HeapWord* end_addr); 1227 1228 // Fill a region, copying objects from one or more source regions. 1229 static void fill_region(ParCompactionManager* cm, size_t region_idx); 1230 static void fill_and_update_region(ParCompactionManager* cm, size_t region) { 1231 fill_region(cm, region); 1232 } 1233 1234 // Fill in the block table for the specified region. 1235 static void fill_blocks(size_t region_idx); 1236 1237 // Update the deferred objects in the space. 1238 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); 1239 1240 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } 1241 static ParallelCompactData& summary_data() { return _summary_data; } 1242 1243 // Reference Processing 1244 static ReferenceProcessor* const ref_processor() { return _ref_processor; } 1245 1246 static STWGCTimer* gc_timer() { return &_gc_timer; } 1247 1248 // Return the SpaceId for the given address. 1249 static SpaceId space_id(HeapWord* addr); 1250 1251 // Time since last full gc (in milliseconds). 1252 static jlong millis_since_last_gc(); 1253 1254 static void print_on_error(outputStream* st); 1255 1256 #ifndef PRODUCT 1257 // Debugging support. 1258 static const char* space_names[last_space_id]; 1259 static void print_region_ranges(); 1260 static void print_dense_prefix_stats(const char* const algorithm, 1261 const SpaceId id, 1262 const bool maximum_compaction, 1263 HeapWord* const addr); 1264 static void summary_phase_msg(SpaceId dst_space_id, 1265 HeapWord* dst_beg, HeapWord* dst_end, 1266 SpaceId src_space_id, 1267 HeapWord* src_beg, HeapWord* src_end); 1268 #endif // #ifndef PRODUCT 1269 1270 #ifdef ASSERT 1271 // Sanity check the new location of a word in the heap. 1272 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr); 1273 // Verify that all the regions have been emptied. 1274 static void verify_complete(SpaceId space_id); 1275 #endif // #ifdef ASSERT 1276 }; 1277 1278 inline bool PSParallelCompact::mark_obj(oop obj) { 1279 const int obj_size = obj->size(); 1280 if (mark_bitmap()->mark_obj(obj, obj_size)) { 1281 _summary_data.add_obj(obj, obj_size); 1282 return true; 1283 } else { 1284 return false; 1285 } 1286 } 1287 1288 inline bool PSParallelCompact::is_marked(oop obj) { 1289 return mark_bitmap()->is_marked(obj); 1290 } 1291 1292 inline bool PSParallelCompact::print_phases() { 1293 return _print_phases; 1294 } 1295 1296 inline double PSParallelCompact::normal_distribution(double density) { 1297 assert(_dwl_initialized, "uninitialized"); 1298 const double squared_term = (density - _dwl_mean) / _dwl_std_dev; 1299 return _dwl_first_term * exp(-0.5 * squared_term * squared_term); 1300 } 1301 1302 inline bool 1303 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region, 1304 idx_t bit) 1305 { 1306 assert(bit > 0, "cannot call this for the first bit/region"); 1307 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit), 1308 "sanity check"); 1309 1310 // Dead space crosses the boundary if (1) a partial object does not extend 1311 // onto the region, (2) an object does not start at the beginning of the 1312 // region, and (3) an object does not end at the end of the prior region. 1313 return region->partial_obj_size() == 0 && 1314 !_mark_bitmap.is_obj_beg(bit) && 1315 !_mark_bitmap.is_obj_end(bit - 1); 1316 } 1317 1318 inline bool 1319 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) { 1320 return p >= beg_addr && p < end_addr; 1321 } 1322 1323 inline bool 1324 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) { 1325 return is_in((HeapWord*)p, beg_addr, end_addr); 1326 } 1327 1328 inline MutableSpace* PSParallelCompact::space(SpaceId id) { 1329 assert(id < last_space_id, "id out of range"); 1330 return _space_info[id].space(); 1331 } 1332 1333 inline HeapWord* PSParallelCompact::new_top(SpaceId id) { 1334 assert(id < last_space_id, "id out of range"); 1335 return _space_info[id].new_top(); 1336 } 1337 1338 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) { 1339 assert(id < last_space_id, "id out of range"); 1340 return _space_info[id].dense_prefix(); 1341 } 1342 1343 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) { 1344 assert(id < last_space_id, "id out of range"); 1345 return _space_info[id].start_array(); 1346 } 1347 1348 #ifdef ASSERT 1349 inline void 1350 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) 1351 { 1352 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), 1353 "must move left or to a different space"); 1354 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr), 1355 "checking alignment"); 1356 } 1357 #endif // ASSERT 1358 1359 class MoveAndUpdateClosure: public ParMarkBitMapClosure { 1360 public: 1361 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, 1362 ObjectStartArray* start_array, 1363 HeapWord* destination, size_t words); 1364 1365 // Accessors. 1366 HeapWord* destination() const { return _destination; } 1367 1368 // If the object will fit (size <= words_remaining()), copy it to the current 1369 // destination, update the interior oops and the start array and return either 1370 // full (if the closure is full) or incomplete. If the object will not fit, 1371 // return would_overflow. 1372 virtual IterationStatus do_addr(HeapWord* addr, size_t size); 1373 1374 // Copy enough words to fill this closure, starting at source(). Interior 1375 // oops and the start array are not updated. Return full. 1376 IterationStatus copy_until_full(); 1377 1378 // Copy enough words to fill this closure or to the end of an object, 1379 // whichever is smaller, starting at source(). Interior oops and the start 1380 // array are not updated. 1381 void copy_partial_obj(); 1382 1383 protected: 1384 // Update variables to indicate that word_count words were processed. 1385 inline void update_state(size_t word_count); 1386 1387 protected: 1388 ObjectStartArray* const _start_array; 1389 HeapWord* _destination; // Next addr to be written. 1390 }; 1391 1392 inline 1393 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, 1394 ParCompactionManager* cm, 1395 ObjectStartArray* start_array, 1396 HeapWord* destination, 1397 size_t words) : 1398 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array) 1399 { 1400 _destination = destination; 1401 } 1402 1403 inline void MoveAndUpdateClosure::update_state(size_t words) 1404 { 1405 decrement_words_remaining(words); 1406 _source += words; 1407 _destination += words; 1408 } 1409 1410 class UpdateOnlyClosure: public ParMarkBitMapClosure { 1411 private: 1412 const PSParallelCompact::SpaceId _space_id; 1413 ObjectStartArray* const _start_array; 1414 1415 public: 1416 UpdateOnlyClosure(ParMarkBitMap* mbm, 1417 ParCompactionManager* cm, 1418 PSParallelCompact::SpaceId space_id); 1419 1420 // Update the object. 1421 virtual IterationStatus do_addr(HeapWord* addr, size_t words); 1422 1423 inline void do_addr(HeapWord* addr); 1424 }; 1425 1426 class FillClosure: public ParMarkBitMapClosure 1427 { 1428 public: 1429 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : 1430 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), 1431 _start_array(PSParallelCompact::start_array(space_id)) 1432 { 1433 assert(space_id == PSParallelCompact::old_space_id, 1434 "cannot use FillClosure in the young gen"); 1435 } 1436 1437 virtual IterationStatus do_addr(HeapWord* addr, size_t size) { 1438 CollectedHeap::fill_with_objects(addr, size); 1439 HeapWord* const end = addr + size; 1440 do { 1441 _start_array->allocate_block(addr); 1442 addr += oop(addr)->size(); 1443 } while (addr < end); 1444 return ParMarkBitMap::incomplete; 1445 } 1446 1447 private: 1448 ObjectStartArray* const _start_array; 1449 }; 1450 1451 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP