1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_HEAPREGION_HPP 26 #define SHARE_VM_GC_G1_HEAPREGION_HPP 27 28 #include "gc/g1/g1AllocationContext.hpp" 29 #include "gc/g1/g1BlockOffsetTable.hpp" 30 #include "gc/g1/heapRegionType.hpp" 31 #include "gc/g1/survRateGroup.hpp" 32 #include "gc/shared/ageTable.hpp" 33 #include "gc/shared/spaceDecorator.hpp" 34 #include "utilities/macros.hpp" 35 36 // A HeapRegion is the smallest piece of a G1CollectedHeap that 37 // can be collected independently. 38 39 // NOTE: Although a HeapRegion is a Space, its 40 // Space::initDirtyCardClosure method must not be called. 41 // The problem is that the existence of this method breaks 42 // the independence of barrier sets from remembered sets. 43 // The solution is to remove this method from the definition 44 // of a Space. 45 46 class G1CollectedHeap; 47 class HeapRegionRemSet; 48 class HeapRegionRemSetIterator; 49 class HeapRegion; 50 class HeapRegionSetBase; 51 class nmethod; 52 53 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]" 54 #define HR_FORMAT_PARAMS(_hr_) \ 55 (_hr_)->hrm_index(), \ 56 (_hr_)->get_short_type_str(), \ 57 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end()) 58 59 // sentinel value for hrm_index 60 #define G1_NO_HRM_INDEX ((uint) -1) 61 62 // A dirty card to oop closure for heap regions. It 63 // knows how to get the G1 heap and how to use the bitmap 64 // in the concurrent marker used by G1 to filter remembered 65 // sets. 66 67 class HeapRegionDCTOC : public DirtyCardToOopClosure { 68 private: 69 HeapRegion* _hr; 70 G1ParPushHeapRSClosure* _rs_scan; 71 G1CollectedHeap* _g1; 72 73 // Walk the given memory region from bottom to (actual) top 74 // looking for objects and applying the oop closure (_cl) to 75 // them. The base implementation of this treats the area as 76 // blocks, where a block may or may not be an object. Sub- 77 // classes should override this to provide more accurate 78 // or possibly more efficient walking. 79 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 80 81 public: 82 HeapRegionDCTOC(G1CollectedHeap* g1, 83 HeapRegion* hr, 84 G1ParPushHeapRSClosure* cl, 85 CardTableModRefBS::PrecisionStyle precision); 86 }; 87 88 // The complicating factor is that BlockOffsetTable diverged 89 // significantly, and we need functionality that is only in the G1 version. 90 // So I copied that code, which led to an alternate G1 version of 91 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could 92 // be reconciled, then G1OffsetTableContigSpace could go away. 93 94 // The idea behind time stamps is the following. We want to keep track of 95 // the highest address where it's safe to scan objects for each region. 96 // This is only relevant for current GC alloc regions so we keep a time stamp 97 // per region to determine if the region has been allocated during the current 98 // GC or not. If the time stamp is current we report a scan_top value which 99 // was saved at the end of the previous GC for retained alloc regions and which is 100 // equal to the bottom for all other regions. 101 // There is a race between card scanners and allocating gc workers where we must ensure 102 // that card scanners do not read the memory allocated by the gc workers. 103 // In order to enforce that, we must not return a value of _top which is more recent than the 104 // time stamp. This is due to the fact that a region may become a gc alloc region at 105 // some point after we've read the timestamp value as being < the current time stamp. 106 // The time stamps are re-initialized to zero at cleanup and at Full GCs. 107 // The current scheme that uses sequential unsigned ints will fail only if we have 4b 108 // evacuation pauses between two cleanups, which is _highly_ unlikely. 109 class G1OffsetTableContigSpace: public CompactibleSpace { 110 friend class VMStructs; 111 HeapWord* volatile _top; 112 HeapWord* volatile _scan_top; 113 protected: 114 G1BlockOffsetArrayContigSpace _offsets; 115 Mutex _par_alloc_lock; 116 volatile unsigned _gc_time_stamp; 117 // When we need to retire an allocation region, while other threads 118 // are also concurrently trying to allocate into it, we typically 119 // allocate a dummy object at the end of the region to ensure that 120 // no more allocations can take place in it. However, sometimes we 121 // want to know where the end of the last "real" object we allocated 122 // into the region was and this is what this keeps track. 123 HeapWord* _pre_dummy_top; 124 125 public: 126 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 127 MemRegion mr); 128 129 void set_top(HeapWord* value) { _top = value; } 130 HeapWord* top() const { return _top; } 131 132 protected: 133 // Reset the G1OffsetTableContigSpace. 134 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 135 136 HeapWord* volatile* top_addr() { return &_top; } 137 // Try to allocate at least min_word_size and up to desired_size from this Space. 138 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of 139 // space allocated. 140 // This version assumes that all allocation requests to this Space are properly 141 // synchronized. 142 inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); 143 // Try to allocate at least min_word_size and up to desired_size from this Space. 144 // Returns NULL if not possible, otherwise sets actual_word_size to the amount of 145 // space allocated. 146 // This version synchronizes with other calls to par_allocate_impl(). 147 inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); 148 149 public: 150 void reset_after_compaction() { set_top(compaction_top()); } 151 152 size_t used() const { return byte_size(bottom(), top()); } 153 size_t free() const { return byte_size(top(), end()); } 154 bool is_free_block(const HeapWord* p) const { return p >= top(); } 155 156 MemRegion used_region() const { return MemRegion(bottom(), top()); } 157 158 void object_iterate(ObjectClosure* blk); 159 void safe_object_iterate(ObjectClosure* blk); 160 161 void set_bottom(HeapWord* value); 162 void set_end(HeapWord* value); 163 164 void mangle_unused_area() PRODUCT_RETURN; 165 void mangle_unused_area_complete() PRODUCT_RETURN; 166 167 HeapWord* scan_top() const; 168 void record_timestamp(); 169 void reset_gc_time_stamp() { _gc_time_stamp = 0; } 170 unsigned get_gc_time_stamp() { return _gc_time_stamp; } 171 void record_retained_region(); 172 173 // See the comment above in the declaration of _pre_dummy_top for an 174 // explanation of what it is. 175 void set_pre_dummy_top(HeapWord* pre_dummy_top) { 176 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition"); 177 _pre_dummy_top = pre_dummy_top; 178 } 179 HeapWord* pre_dummy_top() { 180 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; 181 } 182 void reset_pre_dummy_top() { _pre_dummy_top = NULL; } 183 184 virtual void clear(bool mangle_space); 185 186 HeapWord* block_start(const void* p); 187 HeapWord* block_start_const(const void* p) const; 188 189 // Allocation (return NULL if full). Assumes the caller has established 190 // mutually exclusive access to the space. 191 HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); 192 // Allocation (return NULL if full). Enforces mutual exclusion internally. 193 HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); 194 195 virtual HeapWord* allocate(size_t word_size); 196 virtual HeapWord* par_allocate(size_t word_size); 197 198 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; } 199 200 // MarkSweep support phase3 201 virtual HeapWord* initialize_threshold(); 202 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 203 204 virtual void print() const; 205 206 void reset_bot() { 207 _offsets.reset_bot(); 208 } 209 210 void print_bot_on(outputStream* out) { 211 _offsets.print_on(out); 212 } 213 }; 214 215 class HeapRegion: public G1OffsetTableContigSpace { 216 friend class VMStructs; 217 // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class 218 template <typename SpaceType> 219 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 220 private: 221 222 // The remembered set for this region. 223 // (Might want to make this "inline" later, to avoid some alloc failure 224 // issues.) 225 HeapRegionRemSet* _rem_set; 226 227 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } 228 229 // Auxiliary functions for scan_and_forward support. 230 // See comments for CompactibleSpace for more information. 231 inline HeapWord* scan_limit() const { 232 return top(); 233 } 234 235 inline bool scanned_block_is_obj(const HeapWord* addr) const { 236 return true; // Always true, since scan_limit is top 237 } 238 239 inline size_t scanned_block_size(const HeapWord* addr) const { 240 return HeapRegion::block_size(addr); // Avoid virtual call 241 } 242 243 protected: 244 // The index of this region in the heap region sequence. 245 uint _hrm_index; 246 247 AllocationContext_t _allocation_context; 248 249 HeapRegionType _type; 250 251 // For a humongous region, region in which it starts. 252 HeapRegion* _humongous_start_region; 253 254 // True iff an attempt to evacuate an object in the region failed. 255 bool _evacuation_failed; 256 257 // A heap region may be a member one of a number of special subsets, each 258 // represented as linked lists through the field below. Currently, there 259 // is only one set: 260 // The collection set. 261 HeapRegion* _next_in_special_set; 262 263 // next region in the young "generation" region set 264 HeapRegion* _next_young_region; 265 266 // Next region whose cards need cleaning 267 HeapRegion* _next_dirty_cards_region; 268 269 // Fields used by the HeapRegionSetBase class and subclasses. 270 HeapRegion* _next; 271 HeapRegion* _prev; 272 #ifdef ASSERT 273 HeapRegionSetBase* _containing_set; 274 #endif // ASSERT 275 276 // We use concurrent marking to determine the amount of live data 277 // in each heap region. 278 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. 279 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. 280 281 // The calculated GC efficiency of the region. 282 double _gc_efficiency; 283 284 int _young_index_in_cset; 285 SurvRateGroup* _surv_rate_group; 286 int _age_index; 287 288 // The start of the unmarked area. The unmarked area extends from this 289 // word until the top and/or end of the region, and is the part 290 // of the region for which no marking was done, i.e. objects may 291 // have been allocated in this part since the last mark phase. 292 // "prev" is the top at the start of the last completed marking. 293 // "next" is the top at the start of the in-progress marking (if any.) 294 HeapWord* _prev_top_at_mark_start; 295 HeapWord* _next_top_at_mark_start; 296 // If a collection pause is in progress, this is the top at the start 297 // of that pause. 298 299 void init_top_at_mark_start() { 300 assert(_prev_marked_bytes == 0 && 301 _next_marked_bytes == 0, 302 "Must be called after zero_marked_bytes."); 303 HeapWord* bot = bottom(); 304 _prev_top_at_mark_start = bot; 305 _next_top_at_mark_start = bot; 306 } 307 308 // Cached attributes used in the collection set policy information 309 310 // The RSet length that was added to the total value 311 // for the collection set. 312 size_t _recorded_rs_length; 313 314 // The predicted elapsed time that was added to total value 315 // for the collection set. 316 double _predicted_elapsed_time_ms; 317 318 // The predicted number of bytes to copy that was added to 319 // the total value for the collection set. 320 size_t _predicted_bytes_to_copy; 321 322 public: 323 HeapRegion(uint hrm_index, 324 G1BlockOffsetSharedArray* sharedOffsetArray, 325 MemRegion mr); 326 327 // Initializing the HeapRegion not only resets the data structure, but also 328 // resets the BOT for that heap region. 329 // The default values for clear_space means that we will do the clearing if 330 // there's clearing to be done ourselves. We also always mangle the space. 331 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle); 332 333 static int LogOfHRGrainBytes; 334 static int LogOfHRGrainWords; 335 336 static size_t GrainBytes; 337 static size_t GrainWords; 338 static size_t CardsPerRegion; 339 340 static size_t align_up_to_region_byte_size(size_t sz) { 341 return (sz + (size_t) GrainBytes - 1) & 342 ~((1 << (size_t) LogOfHRGrainBytes) - 1); 343 } 344 345 static size_t max_region_size(); 346 static size_t min_region_size_in_words(); 347 348 // It sets up the heap region size (GrainBytes / GrainWords), as 349 // well as other related fields that are based on the heap region 350 // size (LogOfHRGrainBytes / LogOfHRGrainWords / 351 // CardsPerRegion). All those fields are considered constant 352 // throughout the JVM's execution, therefore they should only be set 353 // up once during initialization time. 354 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size); 355 356 // All allocated blocks are occupied by objects in a HeapRegion 357 bool block_is_obj(const HeapWord* p) const; 358 359 // Returns the object size for all valid block starts 360 // and the amount of unallocated words if called on top() 361 size_t block_size(const HeapWord* p) const; 362 363 // Override for scan_and_forward support. 364 void prepare_for_compaction(CompactPoint* cp); 365 366 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size); 367 inline HeapWord* allocate_no_bot_updates(size_t word_size); 368 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size); 369 370 // If this region is a member of a HeapRegionManager, the index in that 371 // sequence, otherwise -1. 372 uint hrm_index() const { return _hrm_index; } 373 374 // The number of bytes marked live in the region in the last marking phase. 375 size_t marked_bytes() { return _prev_marked_bytes; } 376 size_t live_bytes() { 377 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes(); 378 } 379 380 // The number of bytes counted in the next marking. 381 size_t next_marked_bytes() { return _next_marked_bytes; } 382 // The number of bytes live wrt the next marking. 383 size_t next_live_bytes() { 384 return 385 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes(); 386 } 387 388 // A lower bound on the amount of garbage bytes in the region. 389 size_t garbage_bytes() { 390 size_t used_at_mark_start_bytes = 391 (prev_top_at_mark_start() - bottom()) * HeapWordSize; 392 return used_at_mark_start_bytes - marked_bytes(); 393 } 394 395 // Return the amount of bytes we'll reclaim if we collect this 396 // region. This includes not only the known garbage bytes in the 397 // region but also any unallocated space in it, i.e., [top, end), 398 // since it will also be reclaimed if we collect the region. 399 size_t reclaimable_bytes() { 400 size_t known_live_bytes = live_bytes(); 401 assert(known_live_bytes <= capacity(), "sanity"); 402 return capacity() - known_live_bytes; 403 } 404 405 // An upper bound on the number of live bytes in the region. 406 size_t max_live_bytes() { return used() - garbage_bytes(); } 407 408 void add_to_marked_bytes(size_t incr_bytes) { 409 _next_marked_bytes = _next_marked_bytes + incr_bytes; 410 } 411 412 void zero_marked_bytes() { 413 _prev_marked_bytes = _next_marked_bytes = 0; 414 } 415 416 const char* get_type_str() const { return _type.get_str(); } 417 const char* get_short_type_str() const { return _type.get_short_str(); } 418 419 bool is_free() const { return _type.is_free(); } 420 421 bool is_young() const { return _type.is_young(); } 422 bool is_eden() const { return _type.is_eden(); } 423 bool is_survivor() const { return _type.is_survivor(); } 424 425 bool is_humongous() const { return _type.is_humongous(); } 426 bool is_starts_humongous() const { return _type.is_starts_humongous(); } 427 bool is_continues_humongous() const { return _type.is_continues_humongous(); } 428 429 bool is_old() const { return _type.is_old(); } 430 431 // A pinned region contains objects which are not moved by garbage collections. 432 // Humongous regions and archive regions are pinned. 433 bool is_pinned() const { return _type.is_pinned(); } 434 435 // An archive region is a pinned region, also tagged as old, which 436 // should not be marked during mark/sweep. This allows the address 437 // space to be shared by JVM instances. 438 bool is_archive() const { return _type.is_archive(); } 439 440 // For a humongous region, region in which it starts. 441 HeapRegion* humongous_start_region() const { 442 return _humongous_start_region; 443 } 444 445 // Makes the current region be a "starts humongous" region, i.e., 446 // the first region in a series of one or more contiguous regions 447 // that will contain a single "humongous" object. 448 // 449 // obj_top : points to the end of the humongous object that's being 450 // allocated. 451 void set_starts_humongous(HeapWord* obj_top); 452 453 // Makes the current region be a "continues humongous' 454 // region. first_hr is the "start humongous" region of the series 455 // which this region will be part of. 456 void set_continues_humongous(HeapRegion* first_hr); 457 458 // Unsets the humongous-related fields on the region. 459 void clear_humongous(); 460 461 // If the region has a remembered set, return a pointer to it. 462 HeapRegionRemSet* rem_set() const { 463 return _rem_set; 464 } 465 466 inline bool in_collection_set() const; 467 468 inline HeapRegion* next_in_collection_set() const; 469 inline void set_next_in_collection_set(HeapRegion* r); 470 471 void set_allocation_context(AllocationContext_t context) { 472 _allocation_context = context; 473 } 474 475 AllocationContext_t allocation_context() const { 476 return _allocation_context; 477 } 478 479 // Methods used by the HeapRegionSetBase class and subclasses. 480 481 // Getter and setter for the next and prev fields used to link regions into 482 // linked lists. 483 HeapRegion* next() { return _next; } 484 HeapRegion* prev() { return _prev; } 485 486 void set_next(HeapRegion* next) { _next = next; } 487 void set_prev(HeapRegion* prev) { _prev = prev; } 488 489 // Every region added to a set is tagged with a reference to that 490 // set. This is used for doing consistency checking to make sure that 491 // the contents of a set are as they should be and it's only 492 // available in non-product builds. 493 #ifdef ASSERT 494 void set_containing_set(HeapRegionSetBase* containing_set) { 495 assert((containing_set == NULL && _containing_set != NULL) || 496 (containing_set != NULL && _containing_set == NULL), 497 "containing_set: " PTR_FORMAT " " 498 "_containing_set: " PTR_FORMAT, 499 p2i(containing_set), p2i(_containing_set)); 500 501 _containing_set = containing_set; 502 } 503 504 HeapRegionSetBase* containing_set() { return _containing_set; } 505 #else // ASSERT 506 void set_containing_set(HeapRegionSetBase* containing_set) { } 507 508 // containing_set() is only used in asserts so there's no reason 509 // to provide a dummy version of it. 510 #endif // ASSERT 511 512 HeapRegion* get_next_young_region() { return _next_young_region; } 513 void set_next_young_region(HeapRegion* hr) { 514 _next_young_region = hr; 515 } 516 517 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; } 518 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; } 519 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } 520 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } 521 522 // Reset HR stuff to default values. 523 void hr_clear(bool par, bool clear_space, bool locked = false); 524 void par_clear(); 525 526 // Get the start of the unmarked area in this region. 527 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } 528 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } 529 530 // Note the start or end of marking. This tells the heap region 531 // that the collector is about to start or has finished (concurrently) 532 // marking the heap. 533 534 // Notify the region that concurrent marking is starting. Initialize 535 // all fields related to the next marking info. 536 inline void note_start_of_marking(); 537 538 // Notify the region that concurrent marking has finished. Copy the 539 // (now finalized) next marking info fields into the prev marking 540 // info fields. 541 inline void note_end_of_marking(); 542 543 // Notify the region that it will be used as to-space during a GC 544 // and we are about to start copying objects into it. 545 inline void note_start_of_copying(bool during_initial_mark); 546 547 // Notify the region that it ceases being to-space during a GC and 548 // we will not copy objects into it any more. 549 inline void note_end_of_copying(bool during_initial_mark); 550 551 // Notify the region that we are about to start processing 552 // self-forwarded objects during evac failure handling. 553 void note_self_forwarding_removal_start(bool during_initial_mark, 554 bool during_conc_mark); 555 556 // Notify the region that we have finished processing self-forwarded 557 // objects during evac failure handling. 558 void note_self_forwarding_removal_end(bool during_initial_mark, 559 bool during_conc_mark, 560 size_t marked_bytes); 561 562 // Returns "false" iff no object in the region was allocated when the 563 // last mark phase ended. 564 bool is_marked() { return _prev_top_at_mark_start != bottom(); } 565 566 void reset_during_compaction() { 567 zero_marked_bytes(); 568 init_top_at_mark_start(); 569 } 570 571 void calc_gc_efficiency(void); 572 double gc_efficiency() { return _gc_efficiency;} 573 574 int young_index_in_cset() const { return _young_index_in_cset; } 575 void set_young_index_in_cset(int index) { 576 assert( (index == -1) || is_young(), "pre-condition" ); 577 _young_index_in_cset = index; 578 } 579 580 int age_in_surv_rate_group() { 581 assert( _surv_rate_group != NULL, "pre-condition" ); 582 assert( _age_index > -1, "pre-condition" ); 583 return _surv_rate_group->age_in_group(_age_index); 584 } 585 586 void record_surv_words_in_group(size_t words_survived) { 587 assert( _surv_rate_group != NULL, "pre-condition" ); 588 assert( _age_index > -1, "pre-condition" ); 589 int age_in_group = age_in_surv_rate_group(); 590 _surv_rate_group->record_surviving_words(age_in_group, words_survived); 591 } 592 593 int age_in_surv_rate_group_cond() { 594 if (_surv_rate_group != NULL) 595 return age_in_surv_rate_group(); 596 else 597 return -1; 598 } 599 600 SurvRateGroup* surv_rate_group() { 601 return _surv_rate_group; 602 } 603 604 void install_surv_rate_group(SurvRateGroup* surv_rate_group) { 605 assert( surv_rate_group != NULL, "pre-condition" ); 606 assert( _surv_rate_group == NULL, "pre-condition" ); 607 assert( is_young(), "pre-condition" ); 608 609 _surv_rate_group = surv_rate_group; 610 _age_index = surv_rate_group->next_age_index(); 611 } 612 613 void uninstall_surv_rate_group() { 614 if (_surv_rate_group != NULL) { 615 assert( _age_index > -1, "pre-condition" ); 616 assert( is_young(), "pre-condition" ); 617 618 _surv_rate_group = NULL; 619 _age_index = -1; 620 } else { 621 assert( _age_index == -1, "pre-condition" ); 622 } 623 } 624 625 void set_free() { _type.set_free(); } 626 627 void set_eden() { _type.set_eden(); } 628 void set_eden_pre_gc() { _type.set_eden_pre_gc(); } 629 void set_survivor() { _type.set_survivor(); } 630 631 void set_old() { _type.set_old(); } 632 633 void set_archive() { _type.set_archive(); } 634 635 // Determine if an object has been allocated since the last 636 // mark performed by the collector. This returns true iff the object 637 // is within the unmarked area of the region. 638 bool obj_allocated_since_prev_marking(oop obj) const { 639 return (HeapWord *) obj >= prev_top_at_mark_start(); 640 } 641 bool obj_allocated_since_next_marking(oop obj) const { 642 return (HeapWord *) obj >= next_top_at_mark_start(); 643 } 644 645 // Returns the "evacuation_failed" property of the region. 646 bool evacuation_failed() { return _evacuation_failed; } 647 648 // Sets the "evacuation_failed" property of the region. 649 void set_evacuation_failed(bool b) { 650 _evacuation_failed = b; 651 652 if (b) { 653 _next_marked_bytes = 0; 654 } 655 } 656 657 // Requires that "mr" be entirely within the region. 658 // Apply "cl->do_object" to all objects that intersect with "mr". 659 // If the iteration encounters an unparseable portion of the region, 660 // or if "cl->abort()" is true after a closure application, 661 // terminate the iteration and return the address of the start of the 662 // subregion that isn't done. (The two can be distinguished by querying 663 // "cl->abort()".) Return of "NULL" indicates that the iteration 664 // completed. 665 HeapWord* 666 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); 667 668 // filter_young: if true and the region is a young region then we 669 // skip the iteration. 670 // card_ptr: if not NULL, and we decide that the card is not young 671 // and we iterate over it, we'll clean the card before we start the 672 // iteration. 673 HeapWord* 674 oops_on_card_seq_iterate_careful(MemRegion mr, 675 FilterOutOfRegionClosure* cl, 676 bool filter_young, 677 jbyte* card_ptr); 678 679 size_t recorded_rs_length() const { return _recorded_rs_length; } 680 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } 681 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } 682 683 void set_recorded_rs_length(size_t rs_length) { 684 _recorded_rs_length = rs_length; 685 } 686 687 void set_predicted_elapsed_time_ms(double ms) { 688 _predicted_elapsed_time_ms = ms; 689 } 690 691 void set_predicted_bytes_to_copy(size_t bytes) { 692 _predicted_bytes_to_copy = bytes; 693 } 694 695 virtual CompactibleSpace* next_compaction_space() const; 696 697 virtual void reset_after_compaction(); 698 699 // Routines for managing a list of code roots (attached to the 700 // this region's RSet) that point into this heap region. 701 void add_strong_code_root(nmethod* nm); 702 void add_strong_code_root_locked(nmethod* nm); 703 void remove_strong_code_root(nmethod* nm); 704 705 // Applies blk->do_code_blob() to each of the entries in 706 // the strong code roots list for this region 707 void strong_code_roots_do(CodeBlobClosure* blk) const; 708 709 // Verify that the entries on the strong code root list for this 710 // region are live and include at least one pointer into this region. 711 void verify_strong_code_roots(VerifyOption vo, bool* failures) const; 712 713 void print() const; 714 void print_on(outputStream* st) const; 715 716 // vo == UsePrevMarking -> use "prev" marking information, 717 // vo == UseNextMarking -> use "next" marking information 718 // vo == UseMarkWord -> use the mark word in the object header 719 // 720 // NOTE: Only the "prev" marking information is guaranteed to be 721 // consistent most of the time, so most calls to this should use 722 // vo == UsePrevMarking. 723 // Currently, there is only one case where this is called with 724 // vo == UseNextMarking, which is to verify the "next" marking 725 // information at the end of remark. 726 // Currently there is only one place where this is called with 727 // vo == UseMarkWord, which is to verify the marking during a 728 // full GC. 729 void verify(VerifyOption vo, bool *failures) const; 730 731 // Override; it uses the "prev" marking information 732 virtual void verify() const; 733 }; 734 735 // HeapRegionClosure is used for iterating over regions. 736 // Terminates the iteration when the "doHeapRegion" method returns "true". 737 class HeapRegionClosure : public StackObj { 738 friend class HeapRegionManager; 739 friend class G1CollectedHeap; 740 741 bool _complete; 742 void incomplete() { _complete = false; } 743 744 public: 745 HeapRegionClosure(): _complete(true) {} 746 747 // Typically called on each region until it returns true. 748 virtual bool doHeapRegion(HeapRegion* r) = 0; 749 750 // True after iteration if the closure was applied to all heap regions 751 // and returned "false" in all cases. 752 bool complete() { return _complete; } 753 }; 754 755 #endif // SHARE_VM_GC_G1_HEAPREGION_HPP