1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_HPP 26 #define SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_HPP 27 28 #include "gc/parallel/mutableSpace.hpp" 29 #include "gc/parallel/objectStartArray.hpp" 30 #include "gc/parallel/parMarkBitMap.hpp" 31 #include "gc/parallel/parallelScavengeHeap.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "gc/shared/collectorCounters.hpp" 34 #include "oops/oop.hpp" 35 36 class ParallelScavengeHeap; 37 class PSAdaptiveSizePolicy; 38 class PSYoungGen; 39 class PSOldGen; 40 class ParCompactionManager; 41 class ParallelTaskTerminator; 42 class PSParallelCompact; 43 class GCTaskManager; 44 class GCTaskQueue; 45 class PreGCValues; 46 class MoveAndUpdateClosure; 47 class RefProcTaskExecutor; 48 class ParallelOldTracer; 49 class STWGCTimer; 50 51 // The SplitInfo class holds the information needed to 'split' a source region 52 // so that the live data can be copied to two destination *spaces*. Normally, 53 // all the live data in a region is copied to a single destination space (e.g., 54 // everything live in a region in eden is copied entirely into the old gen). 55 // However, when the heap is nearly full, all the live data in eden may not fit 56 // into the old gen. Copying only some of the regions from eden to old gen 57 // requires finding a region that does not contain a partial object (i.e., no 58 // live object crosses the region boundary) somewhere near the last object that 59 // does fit into the old gen. Since it's not always possible to find such a 60 // region, splitting is necessary for predictable behavior. 61 // 62 // A region is always split at the end of the partial object. This avoids 63 // additional tests when calculating the new location of a pointer, which is a 64 // very hot code path. The partial object and everything to its left will be 65 // copied to another space (call it dest_space_1). The live data to the right 66 // of the partial object will be copied either within the space itself, or to a 67 // different destination space (distinct from dest_space_1). 68 // 69 // Split points are identified during the summary phase, when region 70 // destinations are computed: data about the split, including the 71 // partial_object_size, is recorded in a SplitInfo record and the 72 // partial_object_size field in the summary data is set to zero. The zeroing is 73 // possible (and necessary) since the partial object will move to a different 74 // destination space than anything to its right, thus the partial object should 75 // not affect the locations of any objects to its right. 76 // 77 // The recorded data is used during the compaction phase, but only rarely: when 78 // the partial object on the split region will be copied across a destination 79 // region boundary. This test is made once each time a region is filled, and is 80 // a simple address comparison, so the overhead is negligible (see 81 // PSParallelCompact::first_src_addr()). 82 // 83 // Notes: 84 // 85 // Only regions with partial objects are split; a region without a partial 86 // object does not need any extra bookkeeping. 87 // 88 // At most one region is split per space, so the amount of data required is 89 // constant. 90 // 91 // A region is split only when the destination space would overflow. Once that 92 // happens, the destination space is abandoned and no other data (even from 93 // other source spaces) is targeted to that destination space. Abandoning the 94 // destination space may leave a somewhat large unused area at the end, if a 95 // large object caused the overflow. 96 // 97 // Future work: 98 // 99 // More bookkeeping would be required to continue to use the destination space. 100 // The most general solution would allow data from regions in two different 101 // source spaces to be "joined" in a single destination region. At the very 102 // least, additional code would be required in next_src_region() to detect the 103 // join and skip to an out-of-order source region. If the join region was also 104 // the last destination region to which a split region was copied (the most 105 // likely case), then additional work would be needed to get fill_region() to 106 // stop iteration and switch to a new source region at the right point. Basic 107 // idea would be to use a fake value for the top of the source space. It is 108 // doable, if a bit tricky. 109 // 110 // A simpler (but less general) solution would fill the remainder of the 111 // destination region with a dummy object and continue filling the next 112 // destination region. 113 114 class SplitInfo 115 { 116 public: 117 // Return true if this split info is valid (i.e., if a split has been 118 // recorded). The very first region cannot have a partial object and thus is 119 // never split, so 0 is the 'invalid' value. 120 bool is_valid() const { return _src_region_idx > 0; } 121 122 // Return true if this split holds data for the specified source region. 123 inline bool is_split(size_t source_region) const; 124 125 // The index of the split region, the size of the partial object on that 126 // region and the destination of the partial object. 127 size_t src_region_idx() const { return _src_region_idx; } 128 size_t partial_obj_size() const { return _partial_obj_size; } 129 HeapWord* destination() const { return _destination; } 130 131 // The destination count of the partial object referenced by this split 132 // (either 1 or 2). This must be added to the destination count of the 133 // remainder of the source region. 134 unsigned int destination_count() const { return _destination_count; } 135 136 // If a word within the partial object will be written to the first word of a 137 // destination region, this is the address of the destination region; 138 // otherwise this is NULL. 139 HeapWord* dest_region_addr() const { return _dest_region_addr; } 140 141 // If a word within the partial object will be written to the first word of a 142 // destination region, this is the address of that word within the partial 143 // object; otherwise this is NULL. 144 HeapWord* first_src_addr() const { return _first_src_addr; } 145 146 // Record the data necessary to split the region src_region_idx. 147 void record(size_t src_region_idx, size_t partial_obj_size, 148 HeapWord* destination); 149 150 void clear(); 151 152 DEBUG_ONLY(void verify_clear();) 153 154 private: 155 size_t _src_region_idx; 156 size_t _partial_obj_size; 157 HeapWord* _destination; 158 unsigned int _destination_count; 159 HeapWord* _dest_region_addr; 160 HeapWord* _first_src_addr; 161 }; 162 163 inline bool SplitInfo::is_split(size_t region_idx) const 164 { 165 return _src_region_idx == region_idx && is_valid(); 166 } 167 168 class SpaceInfo 169 { 170 public: 171 MutableSpace* space() const { return _space; } 172 173 // Where the free space will start after the collection. Valid only after the 174 // summary phase completes. 175 HeapWord* new_top() const { return _new_top; } 176 177 // Allows new_top to be set. 178 HeapWord** new_top_addr() { return &_new_top; } 179 180 // Where the smallest allowable dense prefix ends (used only for perm gen). 181 HeapWord* min_dense_prefix() const { return _min_dense_prefix; } 182 183 // Where the dense prefix ends, or the compacted region begins. 184 HeapWord* dense_prefix() const { return _dense_prefix; } 185 186 // The start array for the (generation containing the) space, or NULL if there 187 // is no start array. 188 ObjectStartArray* start_array() const { return _start_array; } 189 190 SplitInfo& split_info() { return _split_info; } 191 192 void set_space(MutableSpace* s) { _space = s; } 193 void set_new_top(HeapWord* addr) { _new_top = addr; } 194 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } 195 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } 196 void set_start_array(ObjectStartArray* s) { _start_array = s; } 197 198 void publish_new_top() const { _space->set_top(_new_top); } 199 200 private: 201 MutableSpace* _space; 202 HeapWord* _new_top; 203 HeapWord* _min_dense_prefix; 204 HeapWord* _dense_prefix; 205 ObjectStartArray* _start_array; 206 SplitInfo _split_info; 207 }; 208 209 class ParallelCompactData 210 { 211 public: 212 // Sizes are in HeapWords, unless indicated otherwise. 213 static const size_t Log2RegionSize; 214 static const size_t RegionSize; 215 static const size_t RegionSizeBytes; 216 217 // Mask for the bits in a size_t to get an offset within a region. 218 static const size_t RegionSizeOffsetMask; 219 // Mask for the bits in a pointer to get an offset within a region. 220 static const size_t RegionAddrOffsetMask; 221 // Mask for the bits in a pointer to get the address of the start of a region. 222 static const size_t RegionAddrMask; 223 224 static const size_t Log2BlockSize; 225 static const size_t BlockSize; 226 static const size_t BlockSizeBytes; 227 228 static const size_t BlockSizeOffsetMask; 229 static const size_t BlockAddrOffsetMask; 230 static const size_t BlockAddrMask; 231 232 static const size_t BlocksPerRegion; 233 static const size_t Log2BlocksPerRegion; 234 235 class RegionData 236 { 237 public: 238 // Destination address of the region. 239 HeapWord* destination() const { return _destination; } 240 241 // The first region containing data destined for this region. 242 size_t source_region() const { return _source_region; } 243 244 // The object (if any) starting in this region and ending in a different 245 // region that could not be updated during the main (parallel) compaction 246 // phase. This is different from _partial_obj_addr, which is an object that 247 // extends onto a source region. However, the two uses do not overlap in 248 // time, so the same field is used to save space. 249 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; } 250 251 // The starting address of the partial object extending onto the region. 252 HeapWord* partial_obj_addr() const { return _partial_obj_addr; } 253 254 // Size of the partial object extending onto the region (words). 255 size_t partial_obj_size() const { return _partial_obj_size; } 256 257 // Size of live data that lies within this region due to objects that start 258 // in this region (words). This does not include the partial object 259 // extending onto the region (if any), or the part of an object that extends 260 // onto the next region (if any). 261 size_t live_obj_size() const { return _dc_and_los & los_mask; } 262 263 // Total live data that lies within the region (words). 264 size_t data_size() const { return partial_obj_size() + live_obj_size(); } 265 266 // The destination_count is the number of other regions to which data from 267 // this region will be copied. At the end of the summary phase, the valid 268 // values of destination_count are 269 // 270 // 0 - data from the region will be compacted completely into itself, or the 271 // region is empty. The region can be claimed and then filled. 272 // 1 - data from the region will be compacted into 1 other region; some 273 // data from the region may also be compacted into the region itself. 274 // 2 - data from the region will be copied to 2 other regions. 275 // 276 // During compaction as regions are emptied, the destination_count is 277 // decremented (atomically) and when it reaches 0, it can be claimed and 278 // then filled. 279 // 280 // A region is claimed for processing by atomically changing the 281 // destination_count to the claimed value (dc_claimed). After a region has 282 // been filled, the destination_count should be set to the completed value 283 // (dc_completed). 284 inline uint destination_count() const; 285 inline uint destination_count_raw() const; 286 287 // Whether the block table for this region has been filled. 288 inline bool blocks_filled() const; 289 290 // Number of times the block table was filled. 291 DEBUG_ONLY(inline size_t blocks_filled_count() const;) 292 293 // The location of the java heap data that corresponds to this region. 294 inline HeapWord* data_location() const; 295 296 // The highest address referenced by objects in this region. 297 inline HeapWord* highest_ref() const; 298 299 // Whether this region is available to be claimed, has been claimed, or has 300 // been completed. 301 // 302 // Minor subtlety: claimed() returns true if the region is marked 303 // completed(), which is desirable since a region must be claimed before it 304 // can be completed. 305 bool available() const { return _dc_and_los < dc_one; } 306 bool claimed() const { return _dc_and_los >= dc_claimed; } 307 bool completed() const { return _dc_and_los >= dc_completed; } 308 309 // These are not atomic. 310 void set_destination(HeapWord* addr) { _destination = addr; } 311 void set_source_region(size_t region) { _source_region = region; } 312 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 313 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } 314 void set_partial_obj_size(size_t words) { 315 _partial_obj_size = (region_sz_t) words; 316 } 317 inline void set_blocks_filled(); 318 319 inline void set_destination_count(uint count); 320 inline void set_live_obj_size(size_t words); 321 inline void set_data_location(HeapWord* addr); 322 inline void set_completed(); 323 inline bool claim_unsafe(); 324 325 // These are atomic. 326 inline void add_live_obj(size_t words); 327 inline void set_highest_ref(HeapWord* addr); 328 inline void decrement_destination_count(); 329 inline bool claim(); 330 331 private: 332 // The type used to represent object sizes within a region. 333 typedef uint region_sz_t; 334 335 // Constants for manipulating the _dc_and_los field, which holds both the 336 // destination count and live obj size. The live obj size lives at the 337 // least significant end so no masking is necessary when adding. 338 static const region_sz_t dc_shift; // Shift amount. 339 static const region_sz_t dc_mask; // Mask for destination count. 340 static const region_sz_t dc_one; // 1, shifted appropriately. 341 static const region_sz_t dc_claimed; // Region has been claimed. 342 static const region_sz_t dc_completed; // Region has been completed. 343 static const region_sz_t los_mask; // Mask for live obj size. 344 345 HeapWord* _destination; 346 size_t _source_region; 347 HeapWord* _partial_obj_addr; 348 region_sz_t _partial_obj_size; 349 region_sz_t volatile _dc_and_los; 350 bool volatile _blocks_filled; 351 352 #ifdef ASSERT 353 size_t _blocks_filled_count; // Number of block table fills. 354 355 // These enable optimizations that are only partially implemented. Use 356 // debug builds to prevent the code fragments from breaking. 357 HeapWord* _data_location; 358 HeapWord* _highest_ref; 359 #endif // #ifdef ASSERT 360 361 #ifdef ASSERT 362 public: 363 uint _pushed; // 0 until region is pushed onto a stack 364 private: 365 #endif 366 }; 367 368 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block 369 // holds an offset, which is the amount of live data in the Region to the left 370 // of the first live object that starts in the Block. 371 class BlockData 372 { 373 public: 374 typedef unsigned short int blk_ofs_t; 375 376 blk_ofs_t offset() const { return _offset; } 377 void set_offset(size_t val) { _offset = (blk_ofs_t)val; } 378 379 private: 380 blk_ofs_t _offset; 381 }; 382 383 public: 384 ParallelCompactData(); 385 bool initialize(MemRegion covered_region); 386 387 size_t region_count() const { return _region_count; } 388 size_t reserved_byte_size() const { return _reserved_byte_size; } 389 390 // Convert region indices to/from RegionData pointers. 391 inline RegionData* region(size_t region_idx) const; 392 inline size_t region(const RegionData* const region_ptr) const; 393 394 size_t block_count() const { return _block_count; } 395 inline BlockData* block(size_t block_idx) const; 396 inline size_t block(const BlockData* block_ptr) const; 397 398 void add_obj(HeapWord* addr, size_t len); 399 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } 400 401 // Fill in the regions covering [beg, end) so that no data moves; i.e., the 402 // destination of region n is simply the start of region n. The argument beg 403 // must be region-aligned; end need not be. 404 void summarize_dense_prefix(HeapWord* beg, HeapWord* end); 405 406 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info, 407 HeapWord* destination, HeapWord* target_end, 408 HeapWord** target_next); 409 bool summarize(SplitInfo& split_info, 410 HeapWord* source_beg, HeapWord* source_end, 411 HeapWord** source_next, 412 HeapWord* target_beg, HeapWord* target_end, 413 HeapWord** target_next); 414 415 void clear(); 416 void clear_range(size_t beg_region, size_t end_region); 417 void clear_range(HeapWord* beg, HeapWord* end) { 418 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end)); 419 } 420 421 // Return the number of words between addr and the start of the region 422 // containing addr. 423 inline size_t region_offset(const HeapWord* addr) const; 424 425 // Convert addresses to/from a region index or region pointer. 426 inline size_t addr_to_region_idx(const HeapWord* addr) const; 427 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const; 428 inline HeapWord* region_to_addr(size_t region) const; 429 inline HeapWord* region_to_addr(size_t region, size_t offset) const; 430 inline HeapWord* region_to_addr(const RegionData* region) const; 431 432 inline HeapWord* region_align_down(HeapWord* addr) const; 433 inline HeapWord* region_align_up(HeapWord* addr) const; 434 inline bool is_region_aligned(HeapWord* addr) const; 435 436 // Analogous to region_offset() for blocks. 437 size_t block_offset(const HeapWord* addr) const; 438 size_t addr_to_block_idx(const HeapWord* addr) const; 439 size_t addr_to_block_idx(const oop obj) const { 440 return addr_to_block_idx((HeapWord*) obj); 441 } 442 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; 443 inline HeapWord* block_to_addr(size_t block) const; 444 inline size_t region_to_block_idx(size_t region) const; 445 446 inline HeapWord* block_align_down(HeapWord* addr) const; 447 inline HeapWord* block_align_up(HeapWord* addr) const; 448 inline bool is_block_aligned(HeapWord* addr) const; 449 450 // Return the address one past the end of the partial object. 451 HeapWord* partial_obj_end(size_t region_idx) const; 452 453 // Return the location of the object after compaction. 454 HeapWord* calc_new_pointer(HeapWord* addr); 455 456 HeapWord* calc_new_pointer(oop p) { 457 return calc_new_pointer((HeapWord*) p); 458 } 459 460 #ifdef ASSERT 461 void verify_clear(const PSVirtualSpace* vspace); 462 void verify_clear(); 463 #endif // #ifdef ASSERT 464 465 private: 466 bool initialize_block_data(); 467 bool initialize_region_data(size_t region_size); 468 PSVirtualSpace* create_vspace(size_t count, size_t element_size); 469 470 private: 471 HeapWord* _region_start; 472 #ifdef ASSERT 473 HeapWord* _region_end; 474 #endif // #ifdef ASSERT 475 476 PSVirtualSpace* _region_vspace; 477 size_t _reserved_byte_size; 478 RegionData* _region_data; 479 size_t _region_count; 480 481 PSVirtualSpace* _block_vspace; 482 BlockData* _block_data; 483 size_t _block_count; 484 }; 485 486 inline uint 487 ParallelCompactData::RegionData::destination_count_raw() const 488 { 489 return _dc_and_los & dc_mask; 490 } 491 492 inline uint 493 ParallelCompactData::RegionData::destination_count() const 494 { 495 return destination_count_raw() >> dc_shift; 496 } 497 498 inline bool 499 ParallelCompactData::RegionData::blocks_filled() const 500 { 501 bool result = _blocks_filled; 502 OrderAccess::acquire(); 503 return result; 504 } 505 506 #ifdef ASSERT 507 inline size_t 508 ParallelCompactData::RegionData::blocks_filled_count() const 509 { 510 return _blocks_filled_count; 511 } 512 #endif // #ifdef ASSERT 513 514 inline void 515 ParallelCompactData::RegionData::set_blocks_filled() 516 { 517 OrderAccess::release(); 518 _blocks_filled = true; 519 // Debug builds count the number of times the table was filled. 520 DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count)); 521 } 522 523 inline void 524 ParallelCompactData::RegionData::set_destination_count(uint count) 525 { 526 assert(count <= (dc_completed >> dc_shift), "count too large"); 527 const region_sz_t live_sz = (region_sz_t) live_obj_size(); 528 _dc_and_los = (count << dc_shift) | live_sz; 529 } 530 531 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words) 532 { 533 assert(words <= los_mask, "would overflow"); 534 _dc_and_los = destination_count_raw() | (region_sz_t)words; 535 } 536 537 inline void ParallelCompactData::RegionData::decrement_destination_count() 538 { 539 assert(_dc_and_los < dc_claimed, "already claimed"); 540 assert(_dc_and_los >= dc_one, "count would go negative"); 541 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); 542 } 543 544 inline HeapWord* ParallelCompactData::RegionData::data_location() const 545 { 546 DEBUG_ONLY(return _data_location;) 547 NOT_DEBUG(return NULL;) 548 } 549 550 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const 551 { 552 DEBUG_ONLY(return _highest_ref;) 553 NOT_DEBUG(return NULL;) 554 } 555 556 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr) 557 { 558 DEBUG_ONLY(_data_location = addr;) 559 } 560 561 inline void ParallelCompactData::RegionData::set_completed() 562 { 563 assert(claimed(), "must be claimed first"); 564 _dc_and_los = dc_completed | (region_sz_t) live_obj_size(); 565 } 566 567 // MT-unsafe claiming of a region. Should only be used during single threaded 568 // execution. 569 inline bool ParallelCompactData::RegionData::claim_unsafe() 570 { 571 if (available()) { 572 _dc_and_los |= dc_claimed; 573 return true; 574 } 575 return false; 576 } 577 578 inline void ParallelCompactData::RegionData::add_live_obj(size_t words) 579 { 580 assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); 581 Atomic::add((int) words, (volatile int*) &_dc_and_los); 582 } 583 584 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) 585 { 586 #ifdef ASSERT 587 HeapWord* tmp = _highest_ref; 588 while (addr > tmp) { 589 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); 590 } 591 #endif // #ifdef ASSERT 592 } 593 594 inline bool ParallelCompactData::RegionData::claim() 595 { 596 const int los = (int) live_obj_size(); 597 const int old = Atomic::cmpxchg(dc_claimed | los, 598 (volatile int*) &_dc_and_los, los); 599 return old == los; 600 } 601 602 inline ParallelCompactData::RegionData* 603 ParallelCompactData::region(size_t region_idx) const 604 { 605 assert(region_idx <= region_count(), "bad arg"); 606 return _region_data + region_idx; 607 } 608 609 inline size_t 610 ParallelCompactData::region(const RegionData* const region_ptr) const 611 { 612 assert(region_ptr >= _region_data, "bad arg"); 613 assert(region_ptr <= _region_data + region_count(), "bad arg"); 614 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); 615 } 616 617 inline ParallelCompactData::BlockData* 618 ParallelCompactData::block(size_t n) const { 619 assert(n < block_count(), "bad arg"); 620 return _block_data + n; 621 } 622 623 inline size_t 624 ParallelCompactData::region_offset(const HeapWord* addr) const 625 { 626 assert(addr >= _region_start, "bad addr"); 627 assert(addr <= _region_end, "bad addr"); 628 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize; 629 } 630 631 inline size_t 632 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const 633 { 634 assert(addr >= _region_start, "bad addr"); 635 assert(addr <= _region_end, "bad addr"); 636 return pointer_delta(addr, _region_start) >> Log2RegionSize; 637 } 638 639 inline ParallelCompactData::RegionData* 640 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const 641 { 642 return region(addr_to_region_idx(addr)); 643 } 644 645 inline HeapWord* 646 ParallelCompactData::region_to_addr(size_t region) const 647 { 648 assert(region <= _region_count, "region out of range"); 649 return _region_start + (region << Log2RegionSize); 650 } 651 652 inline HeapWord* 653 ParallelCompactData::region_to_addr(const RegionData* region) const 654 { 655 return region_to_addr(pointer_delta(region, _region_data, 656 sizeof(RegionData))); 657 } 658 659 inline HeapWord* 660 ParallelCompactData::region_to_addr(size_t region, size_t offset) const 661 { 662 assert(region <= _region_count, "region out of range"); 663 assert(offset < RegionSize, "offset too big"); // This may be too strict. 664 return region_to_addr(region) + offset; 665 } 666 667 inline HeapWord* 668 ParallelCompactData::region_align_down(HeapWord* addr) const 669 { 670 assert(addr >= _region_start, "bad addr"); 671 assert(addr < _region_end + RegionSize, "bad addr"); 672 return (HeapWord*)(size_t(addr) & RegionAddrMask); 673 } 674 675 inline HeapWord* 676 ParallelCompactData::region_align_up(HeapWord* addr) const 677 { 678 assert(addr >= _region_start, "bad addr"); 679 assert(addr <= _region_end, "bad addr"); 680 return region_align_down(addr + RegionSizeOffsetMask); 681 } 682 683 inline bool 684 ParallelCompactData::is_region_aligned(HeapWord* addr) const 685 { 686 return region_offset(addr) == 0; 687 } 688 689 inline size_t 690 ParallelCompactData::block_offset(const HeapWord* addr) const 691 { 692 assert(addr >= _region_start, "bad addr"); 693 assert(addr <= _region_end, "bad addr"); 694 return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize; 695 } 696 697 inline size_t 698 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const 699 { 700 assert(addr >= _region_start, "bad addr"); 701 assert(addr <= _region_end, "bad addr"); 702 return pointer_delta(addr, _region_start) >> Log2BlockSize; 703 } 704 705 inline ParallelCompactData::BlockData* 706 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const 707 { 708 return block(addr_to_block_idx(addr)); 709 } 710 711 inline HeapWord* 712 ParallelCompactData::block_to_addr(size_t block) const 713 { 714 assert(block < _block_count, "block out of range"); 715 return _region_start + (block << Log2BlockSize); 716 } 717 718 inline size_t 719 ParallelCompactData::region_to_block_idx(size_t region) const 720 { 721 return region << Log2BlocksPerRegion; 722 } 723 724 inline HeapWord* 725 ParallelCompactData::block_align_down(HeapWord* addr) const 726 { 727 assert(addr >= _region_start, "bad addr"); 728 assert(addr < _region_end + RegionSize, "bad addr"); 729 return (HeapWord*)(size_t(addr) & BlockAddrMask); 730 } 731 732 inline HeapWord* 733 ParallelCompactData::block_align_up(HeapWord* addr) const 734 { 735 assert(addr >= _region_start, "bad addr"); 736 assert(addr <= _region_end, "bad addr"); 737 return block_align_down(addr + BlockSizeOffsetMask); 738 } 739 740 inline bool 741 ParallelCompactData::is_block_aligned(HeapWord* addr) const 742 { 743 return block_offset(addr) == 0; 744 } 745 746 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the 747 // do_addr() method. 748 // 749 // The closure is initialized with the number of heap words to process 750 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr() 751 // methods in subclasses should update the total as words are processed. Since 752 // only one subclass actually uses this mechanism to terminate iteration, the 753 // default initial value is > 0. The implementation is here and not in the 754 // single subclass that uses it to avoid making is_full() virtual, and thus 755 // adding a virtual call per live object. 756 757 class ParMarkBitMapClosure: public StackObj { 758 public: 759 typedef ParMarkBitMap::idx_t idx_t; 760 typedef ParMarkBitMap::IterationStatus IterationStatus; 761 762 public: 763 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, 764 size_t words = max_uintx); 765 766 inline ParCompactionManager* compaction_manager() const; 767 inline ParMarkBitMap* bitmap() const; 768 inline size_t words_remaining() const; 769 inline bool is_full() const; 770 inline HeapWord* source() const; 771 772 inline void set_source(HeapWord* addr); 773 774 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0; 775 776 protected: 777 inline void decrement_words_remaining(size_t words); 778 779 private: 780 ParMarkBitMap* const _bitmap; 781 ParCompactionManager* const _compaction_manager; 782 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger. 783 size_t _words_remaining; // Words left to copy. 784 785 protected: 786 HeapWord* _source; // Next addr that would be read. 787 }; 788 789 inline 790 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap, 791 ParCompactionManager* cm, 792 size_t words): 793 _bitmap(bitmap), _compaction_manager(cm) 794 #ifdef ASSERT 795 , _initial_words_remaining(words) 796 #endif 797 { 798 _words_remaining = words; 799 _source = NULL; 800 } 801 802 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const { 803 return _compaction_manager; 804 } 805 806 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const { 807 return _bitmap; 808 } 809 810 inline size_t ParMarkBitMapClosure::words_remaining() const { 811 return _words_remaining; 812 } 813 814 inline bool ParMarkBitMapClosure::is_full() const { 815 return words_remaining() == 0; 816 } 817 818 inline HeapWord* ParMarkBitMapClosure::source() const { 819 return _source; 820 } 821 822 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) { 823 _source = addr; 824 } 825 826 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) { 827 assert(_words_remaining >= words, "processed too many words"); 828 _words_remaining -= words; 829 } 830 831 // The UseParallelOldGC collector is a stop-the-world garbage collector that 832 // does parts of the collection using parallel threads. The collection includes 833 // the tenured generation and the young generation. The permanent generation is 834 // collected at the same time as the other two generations but the permanent 835 // generation is collect by a single GC thread. The permanent generation is 836 // collected serially because of the requirement that during the processing of a 837 // klass AAA, any objects reference by AAA must already have been processed. 838 // This requirement is enforced by a left (lower address) to right (higher 839 // address) sliding compaction. 840 // 841 // There are four phases of the collection. 842 // 843 // - marking phase 844 // - summary phase 845 // - compacting phase 846 // - clean up phase 847 // 848 // Roughly speaking these phases correspond, respectively, to 849 // - mark all the live objects 850 // - calculate the destination of each object at the end of the collection 851 // - move the objects to their destination 852 // - update some references and reinitialize some variables 853 // 854 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The 855 // marking phase is implemented in PSParallelCompact::marking_phase() and does a 856 // complete marking of the heap. The summary phase is implemented in 857 // PSParallelCompact::summary_phase(). The move and update phase is implemented 858 // in PSParallelCompact::compact(). 859 // 860 // A space that is being collected is divided into regions and with each region 861 // is associated an object of type ParallelCompactData. Each region is of a 862 // fixed size and typically will contain more than 1 object and may have parts 863 // of objects at the front and back of the region. 864 // 865 // region -----+---------------------+---------- 866 // objects covered [ AAA )[ BBB )[ CCC )[ DDD ) 867 // 868 // The marking phase does a complete marking of all live objects in the heap. 869 // The marking also compiles the size of the data for all live objects covered 870 // by the region. This size includes the part of any live object spanning onto 871 // the region (part of AAA if it is live) from the front, all live objects 872 // contained in the region (BBB and/or CCC if they are live), and the part of 873 // any live objects covered by the region that extends off the region (part of 874 // DDD if it is live). The marking phase uses multiple GC threads and marking 875 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is 876 // done atomically as is the accumulation of the size of the live objects 877 // covered by a region. 878 // 879 // The summary phase calculates the total live data to the left of each region 880 // XXX. Based on that total and the bottom of the space, it can calculate the 881 // starting location of the live data in XXX. The summary phase calculates for 882 // each region XXX quantities such as 883 // 884 // - the amount of live data at the beginning of a region from an object 885 // entering the region. 886 // - the location of the first live data on the region 887 // - a count of the number of regions receiving live data from XXX. 888 // 889 // See ParallelCompactData for precise details. The summary phase also 890 // calculates the dense prefix for the compaction. The dense prefix is a 891 // portion at the beginning of the space that is not moved. The objects in the 892 // dense prefix do need to have their object references updated. See method 893 // summarize_dense_prefix(). 894 // 895 // The summary phase is done using 1 GC thread. 896 // 897 // The compaction phase moves objects to their new location and updates all 898 // references in the object. 899 // 900 // A current exception is that objects that cross a region boundary are moved 901 // but do not have their references updated. References are not updated because 902 // it cannot easily be determined if the klass pointer KKK for the object AAA 903 // has been updated. KKK likely resides in a region to the left of the region 904 // containing AAA. These AAA's have there references updated at the end in a 905 // clean up phase. See the method PSParallelCompact::update_deferred_objects(). 906 // An alternate strategy is being investigated for this deferral of updating. 907 // 908 // Compaction is done on a region basis. A region that is ready to be filled is 909 // put on a ready list and GC threads take region off the list and fill them. A 910 // region is ready to be filled if it empty of live objects. Such a region may 911 // have been initially empty (only contained dead objects) or may have had all 912 // its live objects copied out already. A region that compacts into itself is 913 // also ready for filling. The ready list is initially filled with empty 914 // regions and regions compacting into themselves. There is always at least 1 915 // region that can be put on the ready list. The regions are atomically added 916 // and removed from the ready list. 917 918 class PSParallelCompact : AllStatic { 919 public: 920 // Convenient access to type names. 921 typedef ParMarkBitMap::idx_t idx_t; 922 typedef ParallelCompactData::RegionData RegionData; 923 typedef ParallelCompactData::BlockData BlockData; 924 925 typedef enum { 926 old_space_id, eden_space_id, 927 from_space_id, to_space_id, last_space_id 928 } SpaceId; 929 930 public: 931 // Inline closure decls 932 // 933 class IsAliveClosure: public BoolObjectClosure { 934 public: 935 virtual bool do_object_b(oop p); 936 }; 937 938 class AdjustPointerClosure: public ExtendedOopClosure { 939 public: 940 template <typename T> void do_oop_nv(T* p); 941 virtual void do_oop(oop* p); 942 virtual void do_oop(narrowOop* p); 943 944 // This closure provides its own oop verification code. 945 debug_only(virtual bool should_verify_oops() { return false; }) 946 }; 947 948 class AdjustKlassClosure : public KlassClosure { 949 public: 950 void do_klass(Klass* klass); 951 }; 952 953 friend class AdjustPointerClosure; 954 friend class AdjustKlassClosure; 955 friend class RefProcTaskProxy; 956 957 private: 958 static STWGCTimer _gc_timer; 959 static ParallelOldTracer _gc_tracer; 960 static elapsedTimer _accumulated_time; 961 static unsigned int _total_invocations; 962 static unsigned int _maximum_compaction_gc_num; 963 static jlong _time_of_last_gc; // ms 964 static CollectorCounters* _counters; 965 static ParMarkBitMap _mark_bitmap; 966 static ParallelCompactData _summary_data; 967 static IsAliveClosure _is_alive_closure; 968 static SpaceInfo _space_info[last_space_id]; 969 static AdjustPointerClosure _adjust_pointer_closure; 970 static AdjustKlassClosure _adjust_klass_closure; 971 972 // Reference processing (used in ...follow_contents) 973 static ReferenceProcessor* _ref_processor; 974 975 // Values computed at initialization and used by dead_wood_limiter(). 976 static double _dwl_mean; 977 static double _dwl_std_dev; 978 static double _dwl_first_term; 979 static double _dwl_adjustment; 980 #ifdef ASSERT 981 static bool _dwl_initialized; 982 #endif // #ifdef ASSERT 983 984 public: 985 static ParallelOldTracer* gc_tracer() { return &_gc_tracer; } 986 987 private: 988 989 static void initialize_space_info(); 990 991 // Clear the marking bitmap and summary data that cover the specified space. 992 static void clear_data_covering_space(SpaceId id); 993 994 static void pre_compact(); 995 static void post_compact(); 996 997 // Mark live objects 998 static void marking_phase(ParCompactionManager* cm, 999 bool maximum_heap_compaction, 1000 ParallelOldTracer *gc_tracer); 1001 1002 // Compute the dense prefix for the designated space. This is an experimental 1003 // implementation currently not used in production. 1004 static HeapWord* compute_dense_prefix_via_density(const SpaceId id, 1005 bool maximum_compaction); 1006 1007 // Methods used to compute the dense prefix. 1008 1009 // Compute the value of the normal distribution at x = density. The mean and 1010 // standard deviation are values saved by initialize_dead_wood_limiter(). 1011 static inline double normal_distribution(double density); 1012 1013 // Initialize the static vars used by dead_wood_limiter(). 1014 static void initialize_dead_wood_limiter(); 1015 1016 // Return the percentage of space that can be treated as "dead wood" (i.e., 1017 // not reclaimed). 1018 static double dead_wood_limiter(double density, size_t min_percent); 1019 1020 // Find the first (left-most) region in the range [beg, end) that has at least 1021 // dead_words of dead space to the left. The argument beg must be the first 1022 // region in the space that is not completely live. 1023 static RegionData* dead_wood_limit_region(const RegionData* beg, 1024 const RegionData* end, 1025 size_t dead_words); 1026 1027 // Return a pointer to the first region in the range [beg, end) that is not 1028 // completely full. 1029 static RegionData* first_dead_space_region(const RegionData* beg, 1030 const RegionData* end); 1031 1032 // Return a value indicating the benefit or 'yield' if the compacted region 1033 // were to start (or equivalently if the dense prefix were to end) at the 1034 // candidate region. Higher values are better. 1035 // 1036 // The value is based on the amount of space reclaimed vs. the costs of (a) 1037 // updating references in the dense prefix plus (b) copying objects and 1038 // updating references in the compacted region. 1039 static inline double reclaimed_ratio(const RegionData* const candidate, 1040 HeapWord* const bottom, 1041 HeapWord* const top, 1042 HeapWord* const new_top); 1043 1044 // Compute the dense prefix for the designated space. 1045 static HeapWord* compute_dense_prefix(const SpaceId id, 1046 bool maximum_compaction); 1047 1048 // Return true if dead space crosses onto the specified Region; bit must be 1049 // the bit index corresponding to the first word of the Region. 1050 static inline bool dead_space_crosses_boundary(const RegionData* region, 1051 idx_t bit); 1052 1053 // Summary phase utility routine to fill dead space (if any) at the dense 1054 // prefix boundary. Should only be called if the the dense prefix is 1055 // non-empty. 1056 static void fill_dense_prefix_end(SpaceId id); 1057 1058 // Clear the summary data source_region field for the specified addresses. 1059 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr); 1060 1061 static void summarize_spaces_quick(); 1062 static void summarize_space(SpaceId id, bool maximum_compaction); 1063 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); 1064 1065 // Adjust addresses in roots. Does not adjust addresses in heap. 1066 static void adjust_roots(); 1067 1068 DEBUG_ONLY(static void write_block_fill_histogram();) 1069 1070 // Move objects to new locations. 1071 static void compact_perm(ParCompactionManager* cm); 1072 static void compact(); 1073 1074 // Add available regions to the stack and draining tasks to the task queue. 1075 static void enqueue_region_draining_tasks(GCTaskQueue* q, 1076 uint parallel_gc_threads); 1077 1078 // Add dense prefix update tasks to the task queue. 1079 static void enqueue_dense_prefix_tasks(GCTaskQueue* q, 1080 uint parallel_gc_threads); 1081 1082 // Add region stealing tasks to the task queue. 1083 static void enqueue_region_stealing_tasks( 1084 GCTaskQueue* q, 1085 ParallelTaskTerminator* terminator_ptr, 1086 uint parallel_gc_threads); 1087 1088 // If objects are left in eden after a collection, try to move the boundary 1089 // and absorb them into the old gen. Returns true if eden was emptied. 1090 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 1091 PSYoungGen* young_gen, 1092 PSOldGen* old_gen); 1093 1094 // Reset time since last full gc 1095 static void reset_millis_since_last_gc(); 1096 1097 public: 1098 1099 PSParallelCompact(); 1100 1101 static void invoke(bool maximum_heap_compaction); 1102 static bool invoke_no_policy(bool maximum_heap_compaction); 1103 1104 static void post_initialize(); 1105 // Perform initialization for PSParallelCompact that requires 1106 // allocations. This should be called during the VM initialization 1107 // at a pointer where it would be appropriate to return a JNI_ENOMEM 1108 // in the event of a failure. 1109 static bool initialize(); 1110 1111 // Closure accessors 1112 static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() { 1113 return &_adjust_pointer_closure; 1114 } 1115 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } 1116 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } 1117 1118 // Public accessors 1119 static elapsedTimer* accumulated_time() { return &_accumulated_time; } 1120 static unsigned int total_invocations() { return _total_invocations; } 1121 static CollectorCounters* counters() { return _counters; } 1122 1123 // Used to add tasks 1124 static GCTaskManager* const gc_task_manager(); 1125 1126 // Marking support 1127 static inline bool mark_obj(oop obj); 1128 static inline bool is_marked(oop obj); 1129 1130 template <class T> static inline void adjust_pointer(T* p); 1131 1132 // Compaction support. 1133 // Return true if p is in the range [beg_addr, end_addr). 1134 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr); 1135 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr); 1136 1137 // Convenience wrappers for per-space data kept in _space_info. 1138 static inline MutableSpace* space(SpaceId space_id); 1139 static inline HeapWord* new_top(SpaceId space_id); 1140 static inline HeapWord* dense_prefix(SpaceId space_id); 1141 static inline ObjectStartArray* start_array(SpaceId space_id); 1142 1143 // Move and update the live objects in the specified space. 1144 static void move_and_update(ParCompactionManager* cm, SpaceId space_id); 1145 1146 // Process the end of the given region range in the dense prefix. 1147 // This includes saving any object not updated. 1148 static void dense_prefix_regions_epilogue(ParCompactionManager* cm, 1149 size_t region_start_index, 1150 size_t region_end_index, 1151 idx_t exiting_object_offset, 1152 idx_t region_offset_start, 1153 idx_t region_offset_end); 1154 1155 // Update a region in the dense prefix. For each live object 1156 // in the region, update it's interior references. For each 1157 // dead object, fill it with deadwood. Dead space at the end 1158 // of a region range will be filled to the start of the next 1159 // live object regardless of the region_index_end. None of the 1160 // objects in the dense prefix move and dead space is dead 1161 // (holds only dead objects that don't need any processing), so 1162 // dead space can be filled in any order. 1163 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, 1164 SpaceId space_id, 1165 size_t region_index_start, 1166 size_t region_index_end); 1167 1168 // Return the address of the count + 1st live word in the range [beg, end). 1169 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count); 1170 1171 // Return the address of the word to be copied to dest_addr, which must be 1172 // aligned to a region boundary. 1173 static HeapWord* first_src_addr(HeapWord* const dest_addr, 1174 SpaceId src_space_id, 1175 size_t src_region_idx); 1176 1177 // Determine the next source region, set closure.source() to the start of the 1178 // new region return the region index. Parameter end_addr is the address one 1179 // beyond the end of source range just processed. If necessary, switch to a 1180 // new source space and set src_space_id (in-out parameter) and src_space_top 1181 // (out parameter) accordingly. 1182 static size_t next_src_region(MoveAndUpdateClosure& closure, 1183 SpaceId& src_space_id, 1184 HeapWord*& src_space_top, 1185 HeapWord* end_addr); 1186 1187 // Decrement the destination count for each non-empty source region in the 1188 // range [beg_region, region(region_align_up(end_addr))). If the destination 1189 // count for a region goes to 0 and it needs to be filled, enqueue it. 1190 static void decrement_destination_counts(ParCompactionManager* cm, 1191 SpaceId src_space_id, 1192 size_t beg_region, 1193 HeapWord* end_addr); 1194 1195 // Fill a region, copying objects from one or more source regions. 1196 static void fill_region(ParCompactionManager* cm, size_t region_idx); 1197 static void fill_and_update_region(ParCompactionManager* cm, size_t region) { 1198 fill_region(cm, region); 1199 } 1200 1201 // Fill in the block table for the specified region. 1202 static void fill_blocks(size_t region_idx); 1203 1204 // Update the deferred objects in the space. 1205 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); 1206 1207 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } 1208 static ParallelCompactData& summary_data() { return _summary_data; } 1209 1210 // Reference Processing 1211 static ReferenceProcessor* const ref_processor() { return _ref_processor; } 1212 1213 static STWGCTimer* gc_timer() { return &_gc_timer; } 1214 1215 // Return the SpaceId for the given address. 1216 static SpaceId space_id(HeapWord* addr); 1217 1218 // Time since last full gc (in milliseconds). 1219 static jlong millis_since_last_gc(); 1220 1221 static void print_on_error(outputStream* st); 1222 1223 #ifndef PRODUCT 1224 // Debugging support. 1225 static const char* space_names[last_space_id]; 1226 static void print_region_ranges(); 1227 static void print_dense_prefix_stats(const char* const algorithm, 1228 const SpaceId id, 1229 const bool maximum_compaction, 1230 HeapWord* const addr); 1231 static void summary_phase_msg(SpaceId dst_space_id, 1232 HeapWord* dst_beg, HeapWord* dst_end, 1233 SpaceId src_space_id, 1234 HeapWord* src_beg, HeapWord* src_end); 1235 #endif // #ifndef PRODUCT 1236 1237 #ifdef ASSERT 1238 // Sanity check the new location of a word in the heap. 1239 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr); 1240 // Verify that all the regions have been emptied. 1241 static void verify_complete(SpaceId space_id); 1242 #endif // #ifdef ASSERT 1243 }; 1244 1245 inline bool PSParallelCompact::is_marked(oop obj) { 1246 return mark_bitmap()->is_marked(obj); 1247 } 1248 1249 inline double PSParallelCompact::normal_distribution(double density) { 1250 assert(_dwl_initialized, "uninitialized"); 1251 const double squared_term = (density - _dwl_mean) / _dwl_std_dev; 1252 return _dwl_first_term * exp(-0.5 * squared_term * squared_term); 1253 } 1254 1255 inline bool 1256 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region, 1257 idx_t bit) 1258 { 1259 assert(bit > 0, "cannot call this for the first bit/region"); 1260 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit), 1261 "sanity check"); 1262 1263 // Dead space crosses the boundary if (1) a partial object does not extend 1264 // onto the region, (2) an object does not start at the beginning of the 1265 // region, and (3) an object does not end at the end of the prior region. 1266 return region->partial_obj_size() == 0 && 1267 !_mark_bitmap.is_obj_beg(bit) && 1268 !_mark_bitmap.is_obj_end(bit - 1); 1269 } 1270 1271 inline bool 1272 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) { 1273 return p >= beg_addr && p < end_addr; 1274 } 1275 1276 inline bool 1277 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) { 1278 return is_in((HeapWord*)p, beg_addr, end_addr); 1279 } 1280 1281 inline MutableSpace* PSParallelCompact::space(SpaceId id) { 1282 assert(id < last_space_id, "id out of range"); 1283 return _space_info[id].space(); 1284 } 1285 1286 inline HeapWord* PSParallelCompact::new_top(SpaceId id) { 1287 assert(id < last_space_id, "id out of range"); 1288 return _space_info[id].new_top(); 1289 } 1290 1291 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) { 1292 assert(id < last_space_id, "id out of range"); 1293 return _space_info[id].dense_prefix(); 1294 } 1295 1296 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) { 1297 assert(id < last_space_id, "id out of range"); 1298 return _space_info[id].start_array(); 1299 } 1300 1301 #ifdef ASSERT 1302 inline void 1303 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) 1304 { 1305 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), 1306 "must move left or to a different space"); 1307 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr), 1308 "checking alignment"); 1309 } 1310 #endif // ASSERT 1311 1312 class MoveAndUpdateClosure: public ParMarkBitMapClosure { 1313 public: 1314 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, 1315 ObjectStartArray* start_array, 1316 HeapWord* destination, size_t words); 1317 1318 // Accessors. 1319 HeapWord* destination() const { return _destination; } 1320 1321 // If the object will fit (size <= words_remaining()), copy it to the current 1322 // destination, update the interior oops and the start array and return either 1323 // full (if the closure is full) or incomplete. If the object will not fit, 1324 // return would_overflow. 1325 virtual IterationStatus do_addr(HeapWord* addr, size_t size); 1326 1327 // Copy enough words to fill this closure, starting at source(). Interior 1328 // oops and the start array are not updated. Return full. 1329 IterationStatus copy_until_full(); 1330 1331 // Copy enough words to fill this closure or to the end of an object, 1332 // whichever is smaller, starting at source(). Interior oops and the start 1333 // array are not updated. 1334 void copy_partial_obj(); 1335 1336 protected: 1337 // Update variables to indicate that word_count words were processed. 1338 inline void update_state(size_t word_count); 1339 1340 protected: 1341 ObjectStartArray* const _start_array; 1342 HeapWord* _destination; // Next addr to be written. 1343 }; 1344 1345 inline 1346 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, 1347 ParCompactionManager* cm, 1348 ObjectStartArray* start_array, 1349 HeapWord* destination, 1350 size_t words) : 1351 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array) 1352 { 1353 _destination = destination; 1354 } 1355 1356 inline void MoveAndUpdateClosure::update_state(size_t words) 1357 { 1358 decrement_words_remaining(words); 1359 _source += words; 1360 _destination += words; 1361 } 1362 1363 class UpdateOnlyClosure: public ParMarkBitMapClosure { 1364 private: 1365 const PSParallelCompact::SpaceId _space_id; 1366 ObjectStartArray* const _start_array; 1367 1368 public: 1369 UpdateOnlyClosure(ParMarkBitMap* mbm, 1370 ParCompactionManager* cm, 1371 PSParallelCompact::SpaceId space_id); 1372 1373 // Update the object. 1374 virtual IterationStatus do_addr(HeapWord* addr, size_t words); 1375 1376 inline void do_addr(HeapWord* addr); 1377 }; 1378 1379 class FillClosure: public ParMarkBitMapClosure { 1380 public: 1381 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : 1382 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), 1383 _start_array(PSParallelCompact::start_array(space_id)) 1384 { 1385 assert(space_id == PSParallelCompact::old_space_id, 1386 "cannot use FillClosure in the young gen"); 1387 } 1388 1389 virtual IterationStatus do_addr(HeapWord* addr, size_t size); 1390 1391 private: 1392 ObjectStartArray* const _start_array; 1393 }; 1394 1395 #endif // SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_HPP