1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 27 28 #include "gc_implementation/g1/concurrentMark.hpp" 29 #include "gc_implementation/g1/g1RemSet.hpp" 30 #include "gc_implementation/g1/heapRegion.hpp" 31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" 32 #include "memory/barrierSet.hpp" 33 #include "memory/memRegion.hpp" 34 #include "memory/sharedHeap.hpp" 35 36 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. 37 // It uses the "Garbage First" heap organization and algorithm, which 38 // may combine concurrent marking with parallel, incremental compaction of 39 // heap subsets that will yield large amounts of garbage. 40 41 class HeapRegion; 42 class HeapRegionSeq; 43 class PermanentGenerationSpec; 44 class GenerationSpec; 45 class OopsInHeapRegionClosure; 46 class G1ScanHeapEvacClosure; 47 class ObjectClosure; 48 class SpaceClosure; 49 class CompactibleSpaceClosure; 50 class Space; 51 class G1CollectorPolicy; 52 class GenRemSet; 53 class G1RemSet; 54 class HeapRegionRemSetIterator; 55 class ConcurrentMark; 56 class ConcurrentMarkThread; 57 class ConcurrentG1Refine; 58 class ConcurrentZFThread; 59 60 typedef OverflowTaskQueue<StarTask> RefToScanQueue; 61 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; 62 63 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 64 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 65 66 enum G1GCThreadGroups { 67 G1CRGroup = 0, 68 G1ZFGroup = 1, 69 G1CMGroup = 2, 70 G1CLGroup = 3 71 }; 72 73 enum GCAllocPurpose { 74 GCAllocForTenured, 75 GCAllocForSurvived, 76 GCAllocPurposeCount 77 }; 78 79 class YoungList : public CHeapObj { 80 private: 81 G1CollectedHeap* _g1h; 82 83 HeapRegion* _head; 84 85 HeapRegion* _survivor_head; 86 HeapRegion* _survivor_tail; 87 88 HeapRegion* _curr; 89 90 size_t _length; 91 size_t _survivor_length; 92 93 size_t _last_sampled_rs_lengths; 94 size_t _sampled_rs_lengths; 95 96 void empty_list(HeapRegion* list); 97 98 public: 99 YoungList(G1CollectedHeap* g1h); 100 101 void push_region(HeapRegion* hr); 102 void add_survivor_region(HeapRegion* hr); 103 104 void empty_list(); 105 bool is_empty() { return _length == 0; } 106 size_t length() { return _length; } 107 size_t survivor_length() { return _survivor_length; } 108 109 void rs_length_sampling_init(); 110 bool rs_length_sampling_more(); 111 void rs_length_sampling_next(); 112 113 void reset_sampled_info() { 114 _last_sampled_rs_lengths = 0; 115 } 116 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } 117 118 // for development purposes 119 void reset_auxilary_lists(); 120 void clear() { _head = NULL; _length = 0; } 121 122 void clear_survivors() { 123 _survivor_head = NULL; 124 _survivor_tail = NULL; 125 _survivor_length = 0; 126 } 127 128 HeapRegion* first_region() { return _head; } 129 HeapRegion* first_survivor_region() { return _survivor_head; } 130 HeapRegion* last_survivor_region() { return _survivor_tail; } 131 132 // debugging 133 bool check_list_well_formed(); 134 bool check_list_empty(bool check_sample = true); 135 void print(); 136 }; 137 138 class RefineCardTableEntryClosure; 139 class G1CollectedHeap : public SharedHeap { 140 friend class VM_G1CollectForAllocation; 141 friend class VM_GenCollectForPermanentAllocation; 142 friend class VM_G1CollectFull; 143 friend class VM_G1IncCollectionPause; 144 friend class VMStructs; 145 146 // Closures used in implementation. 147 friend class G1ParCopyHelper; 148 friend class G1IsAliveClosure; 149 friend class G1EvacuateFollowersClosure; 150 friend class G1ParScanThreadState; 151 friend class G1ParScanClosureSuper; 152 friend class G1ParEvacuateFollowersClosure; 153 friend class G1ParTask; 154 friend class G1FreeGarbageRegionClosure; 155 friend class RefineCardTableEntryClosure; 156 friend class G1PrepareCompactClosure; 157 friend class RegionSorter; 158 friend class CountRCClosure; 159 friend class EvacPopObjClosure; 160 friend class G1ParCleanupCTTask; 161 162 // Other related classes. 163 friend class G1MarkSweep; 164 165 private: 166 // The one and only G1CollectedHeap, so static functions can find it. 167 static G1CollectedHeap* _g1h; 168 169 static size_t _humongous_object_threshold_in_words; 170 171 // Storage for the G1 heap (excludes the permanent generation). 172 VirtualSpace _g1_storage; 173 MemRegion _g1_reserved; 174 175 // The part of _g1_storage that is currently committed. 176 MemRegion _g1_committed; 177 178 // The maximum part of _g1_storage that has ever been committed. 179 MemRegion _g1_max_committed; 180 181 // The number of regions that are completely free. 182 size_t _free_regions; 183 184 // The number of regions we could create by expansion. 185 size_t _expansion_regions; 186 187 // Return the number of free regions in the heap (by direct counting.) 188 size_t count_free_regions(); 189 // Return the number of free regions on the free and unclean lists. 190 size_t count_free_regions_list(); 191 192 // The block offset table for the G1 heap. 193 G1BlockOffsetSharedArray* _bot_shared; 194 195 // Move all of the regions off the free lists, then rebuild those free 196 // lists, before and after full GC. 197 void tear_down_region_lists(); 198 void rebuild_region_lists(); 199 // This sets all non-empty regions to need zero-fill (which they will if 200 // they are empty after full collection.) 201 void set_used_regions_to_need_zero_fill(); 202 203 // The sequence of all heap regions in the heap. 204 HeapRegionSeq* _hrs; 205 206 // The region from which normal-sized objects are currently being 207 // allocated. May be NULL. 208 HeapRegion* _cur_alloc_region; 209 210 // Postcondition: cur_alloc_region == NULL. 211 void abandon_cur_alloc_region(); 212 void abandon_gc_alloc_regions(); 213 214 // The to-space memory regions into which objects are being copied during 215 // a GC. 216 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; 217 size_t _gc_alloc_region_counts[GCAllocPurposeCount]; 218 // These are the regions, one per GCAllocPurpose, that are half-full 219 // at the end of a collection and that we want to reuse during the 220 // next collection. 221 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; 222 // This specifies whether we will keep the last half-full region at 223 // the end of a collection so that it can be reused during the next 224 // collection (this is specified per GCAllocPurpose) 225 bool _retain_gc_alloc_region[GCAllocPurposeCount]; 226 227 // A list of the regions that have been set to be alloc regions in the 228 // current collection. 229 HeapRegion* _gc_alloc_region_list; 230 231 // Determines PLAB size for a particular allocation purpose. 232 static size_t desired_plab_sz(GCAllocPurpose purpose); 233 234 // When called by par thread, require par_alloc_during_gc_lock() to be held. 235 void push_gc_alloc_region(HeapRegion* hr); 236 237 // This should only be called single-threaded. Undeclares all GC alloc 238 // regions. 239 void forget_alloc_region_list(); 240 241 // Should be used to set an alloc region, because there's other 242 // associated bookkeeping. 243 void set_gc_alloc_region(int purpose, HeapRegion* r); 244 245 // Check well-formedness of alloc region list. 246 bool check_gc_alloc_regions(); 247 248 // Outside of GC pauses, the number of bytes used in all regions other 249 // than the current allocation region. 250 size_t _summary_bytes_used; 251 252 // This is used for a quick test on whether a reference points into 253 // the collection set or not. Basically, we have an array, with one 254 // byte per region, and that byte denotes whether the corresponding 255 // region is in the collection set or not. The entry corresponding 256 // the bottom of the heap, i.e., region 0, is pointed to by 257 // _in_cset_fast_test_base. The _in_cset_fast_test field has been 258 // biased so that it actually points to address 0 of the address 259 // space, to make the test as fast as possible (we can simply shift 260 // the address to address into it, instead of having to subtract the 261 // bottom of the heap from the address before shifting it; basically 262 // it works in the same way the card table works). 263 bool* _in_cset_fast_test; 264 265 // The allocated array used for the fast test on whether a reference 266 // points into the collection set or not. This field is also used to 267 // free the array. 268 bool* _in_cset_fast_test_base; 269 270 // The length of the _in_cset_fast_test_base array. 271 size_t _in_cset_fast_test_length; 272 273 volatile unsigned _gc_time_stamp; 274 275 size_t* _surviving_young_words; 276 277 void setup_surviving_young_words(); 278 void update_surviving_young_words(size_t* surv_young_words); 279 void cleanup_surviving_young_words(); 280 281 // It decides whether an explicit GC should start a concurrent cycle 282 // instead of doing a STW GC. Currently, a concurrent cycle is 283 // explicitly started if: 284 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 285 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 286 bool should_do_concurrent_full_gc(GCCause::Cause cause); 287 288 // Keeps track of how many "full collections" (i.e., Full GCs or 289 // concurrent cycles) we have completed. The number of them we have 290 // started is maintained in _total_full_collections in CollectedHeap. 291 volatile unsigned int _full_collections_completed; 292 293 protected: 294 295 // Returns "true" iff none of the gc alloc regions have any allocations 296 // since the last call to "save_marks". 297 bool all_alloc_regions_no_allocs_since_save_marks(); 298 // Perform finalization stuff on all allocation regions. 299 void retire_all_alloc_regions(); 300 301 // The number of regions allocated to hold humongous objects. 302 int _num_humongous_regions; 303 YoungList* _young_list; 304 305 // The current policy object for the collector. 306 G1CollectorPolicy* _g1_policy; 307 308 // Parallel allocation lock to protect the current allocation region. 309 Mutex _par_alloc_during_gc_lock; 310 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } 311 312 // If possible/desirable, allocate a new HeapRegion for normal object 313 // allocation sufficient for an allocation of the given "word_size". 314 // If "do_expand" is true, will attempt to expand the heap if necessary 315 // to to satisfy the request. If "zero_filled" is true, requires a 316 // zero-filled region. 317 // (Returning NULL will trigger a GC.) 318 virtual HeapRegion* newAllocRegion_work(size_t word_size, 319 bool do_expand, 320 bool zero_filled); 321 322 virtual HeapRegion* newAllocRegion(size_t word_size, 323 bool zero_filled = true) { 324 return newAllocRegion_work(word_size, false, zero_filled); 325 } 326 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, 327 size_t word_size, 328 bool zero_filled = true); 329 330 // Attempt to allocate an object of the given (very large) "word_size". 331 // Returns "NULL" on failure. 332 virtual HeapWord* humongousObjAllocate(size_t word_size); 333 334 // If possible, allocate a block of the given word_size, else return "NULL". 335 // Returning NULL will trigger GC or heap expansion. 336 // These two methods have rather awkward pre- and 337 // post-conditions. If they are called outside a safepoint, then 338 // they assume that the caller is holding the heap lock. Upon return 339 // they release the heap lock, if they are returning a non-NULL 340 // value. attempt_allocation_slow() also dirties the cards of a 341 // newly-allocated young region after it releases the heap 342 // lock. This change in interface was the neatest way to achieve 343 // this card dirtying without affecting mem_allocate(), which is a 344 // more frequently called method. We tried two or three different 345 // approaches, but they were even more hacky. 346 HeapWord* attempt_allocation(size_t word_size, 347 bool permit_collection_pause = true); 348 349 HeapWord* attempt_allocation_slow(size_t word_size, 350 bool permit_collection_pause = true); 351 352 // Allocate blocks during garbage collection. Will ensure an 353 // allocation region, either by picking one or expanding the 354 // heap, and then allocate a block of the given size. The block 355 // may not be a humongous - it must fit into a single heap region. 356 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 357 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 358 359 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, 360 HeapRegion* alloc_region, 361 bool par, 362 size_t word_size); 363 364 // Ensure that no further allocations can happen in "r", bearing in mind 365 // that parallel threads might be attempting allocations. 366 void par_allocate_remaining_space(HeapRegion* r); 367 368 // Retires an allocation region when it is full or at the end of a 369 // GC pause. 370 void retire_alloc_region(HeapRegion* alloc_region, bool par); 371 372 // - if explicit_gc is true, the GC is for a System.gc() or a heap 373 // inspection request and should collect the entire heap 374 // - if clear_all_soft_refs is true, all soft references are cleared 375 // during the GC 376 // - if explicit_gc is false, word_size describes the allocation that 377 // the GC should attempt (at least) to satisfy 378 void do_collection(bool explicit_gc, 379 bool clear_all_soft_refs, 380 size_t word_size); 381 382 // Callback from VM_G1CollectFull operation. 383 // Perform a full collection. 384 void do_full_collection(bool clear_all_soft_refs); 385 386 // Resize the heap if necessary after a full collection. If this is 387 // after a collect-for allocation, "word_size" is the allocation size, 388 // and will be considered part of the used portion of the heap. 389 void resize_if_necessary_after_full_collection(size_t word_size); 390 391 // Callback from VM_G1CollectForAllocation operation. 392 // This function does everything necessary/possible to satisfy a 393 // failed allocation request (including collection, expansion, etc.) 394 HeapWord* satisfy_failed_allocation(size_t word_size); 395 396 // Attempting to expand the heap sufficiently 397 // to support an allocation of the given "word_size". If 398 // successful, perform the allocation and return the address of the 399 // allocated block, or else "NULL". 400 virtual HeapWord* expand_and_allocate(size_t word_size); 401 402 public: 403 // Expand the garbage-first heap by at least the given size (in bytes!). 404 // (Rounds up to a HeapRegion boundary.) 405 virtual void expand(size_t expand_bytes); 406 407 // Do anything common to GC's. 408 virtual void gc_prologue(bool full); 409 virtual void gc_epilogue(bool full); 410 411 // We register a region with the fast "in collection set" test. We 412 // simply set to true the array slot corresponding to this region. 413 void register_region_with_in_cset_fast_test(HeapRegion* r) { 414 assert(_in_cset_fast_test_base != NULL, "sanity"); 415 assert(r->in_collection_set(), "invariant"); 416 int index = r->hrs_index(); 417 assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); 418 assert(!_in_cset_fast_test_base[index], "invariant"); 419 _in_cset_fast_test_base[index] = true; 420 } 421 422 // This is a fast test on whether a reference points into the 423 // collection set or not. It does not assume that the reference 424 // points into the heap; if it doesn't, it will return false. 425 bool in_cset_fast_test(oop obj) { 426 assert(_in_cset_fast_test != NULL, "sanity"); 427 if (_g1_committed.contains((HeapWord*) obj)) { 428 // no need to subtract the bottom of the heap from obj, 429 // _in_cset_fast_test is biased 430 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; 431 bool ret = _in_cset_fast_test[index]; 432 // let's make sure the result is consistent with what the slower 433 // test returns 434 assert( ret || !obj_in_cs(obj), "sanity"); 435 assert(!ret || obj_in_cs(obj), "sanity"); 436 return ret; 437 } else { 438 return false; 439 } 440 } 441 442 void clear_cset_fast_test() { 443 assert(_in_cset_fast_test_base != NULL, "sanity"); 444 memset(_in_cset_fast_test_base, false, 445 _in_cset_fast_test_length * sizeof(bool)); 446 } 447 448 // This is called at the end of either a concurrent cycle or a Full 449 // GC to update the number of full collections completed. Those two 450 // can happen in a nested fashion, i.e., we start a concurrent 451 // cycle, a Full GC happens half-way through it which ends first, 452 // and then the cycle notices that a Full GC happened and ends 453 // too. The outer parameter is a boolean to help us do a bit tighter 454 // consistency checking in the method. If outer is false, the caller 455 // is the inner caller in the nesting (i.e., the Full GC). If outer 456 // is true, the caller is the outer caller in this nesting (i.e., 457 // the concurrent cycle). Further nesting is not currently 458 // supported. The end of the this call also notifies the 459 // FullGCCount_lock in case a Java thread is waiting for a full GC 460 // to happen (e.g., it called System.gc() with 461 // +ExplicitGCInvokesConcurrent). 462 void increment_full_collections_completed(bool outer); 463 464 unsigned int full_collections_completed() { 465 return _full_collections_completed; 466 } 467 468 protected: 469 470 // Shrink the garbage-first heap by at most the given size (in bytes!). 471 // (Rounds down to a HeapRegion boundary.) 472 virtual void shrink(size_t expand_bytes); 473 void shrink_helper(size_t expand_bytes); 474 475 #if TASKQUEUE_STATS 476 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 477 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; 478 void reset_taskqueue_stats(); 479 #endif // TASKQUEUE_STATS 480 481 // Do an incremental collection: identify a collection set, and evacuate 482 // its live objects elsewhere. 483 virtual void do_collection_pause(); 484 485 // The guts of the incremental collection pause, executed by the vm 486 // thread. 487 virtual void do_collection_pause_at_safepoint(double target_pause_time_ms); 488 489 // Actually do the work of evacuating the collection set. 490 virtual void evacuate_collection_set(); 491 492 // If this is an appropriate right time, do a collection pause. 493 // The "word_size" argument, if non-zero, indicates the size of an 494 // allocation request that is prompting this query. 495 void do_collection_pause_if_appropriate(size_t word_size); 496 497 // The g1 remembered set of the heap. 498 G1RemSet* _g1_rem_set; 499 // And it's mod ref barrier set, used to track updates for the above. 500 ModRefBarrierSet* _mr_bs; 501 502 // A set of cards that cover the objects for which the Rsets should be updated 503 // concurrently after the collection. 504 DirtyCardQueueSet _dirty_card_queue_set; 505 506 // The Heap Region Rem Set Iterator. 507 HeapRegionRemSetIterator** _rem_set_iterator; 508 509 // The closure used to refine a single card. 510 RefineCardTableEntryClosure* _refine_cte_cl; 511 512 // A function to check the consistency of dirty card logs. 513 void check_ct_logs_at_safepoint(); 514 515 // A DirtyCardQueueSet that is used to hold cards that contain 516 // references into the current collection set. This is used to 517 // update the remembered sets of the regions in the collection 518 // set in the event of an evacuation failure. 519 DirtyCardQueueSet _into_cset_dirty_card_queue_set; 520 521 // After a collection pause, make the regions in the CS into free 522 // regions. 523 void free_collection_set(HeapRegion* cs_head); 524 525 // Abandon the current collection set without recording policy 526 // statistics or updating free lists. 527 void abandon_collection_set(HeapRegion* cs_head); 528 529 // Applies "scan_non_heap_roots" to roots outside the heap, 530 // "scan_rs" to roots inside the heap (having done "set_region" to 531 // indicate the region in which the root resides), and does "scan_perm" 532 // (setting the generation to the perm generation.) If "scan_rs" is 533 // NULL, then this step is skipped. The "worker_i" 534 // param is for use with parallel roots processing, and should be 535 // the "i" of the calling parallel worker thread's work(i) function. 536 // In the sequential case this param will be ignored. 537 void g1_process_strong_roots(bool collecting_perm_gen, 538 SharedHeap::ScanningOption so, 539 OopClosure* scan_non_heap_roots, 540 OopsInHeapRegionClosure* scan_rs, 541 OopsInGenClosure* scan_perm, 542 int worker_i); 543 544 // Apply "blk" to all the weak roots of the system. These include 545 // JNI weak roots, the code cache, system dictionary, symbol table, 546 // string table, and referents of reachable weak refs. 547 void g1_process_weak_roots(OopClosure* root_closure, 548 OopClosure* non_root_closure); 549 550 // Invoke "save_marks" on all heap regions. 551 void save_marks(); 552 553 // Free a heap region. 554 void free_region(HeapRegion* hr); 555 // A component of "free_region", exposed for 'batching'. 556 // All the params after "hr" are out params: the used bytes of the freed 557 // region(s), the number of H regions cleared, the number of regions 558 // freed, and pointers to the head and tail of a list of freed contig 559 // regions, linked throught the "next_on_unclean_list" field. 560 void free_region_work(HeapRegion* hr, 561 size_t& pre_used, 562 size_t& cleared_h, 563 size_t& freed_regions, 564 UncleanRegionList* list, 565 bool par = false); 566 567 568 // The concurrent marker (and the thread it runs in.) 569 ConcurrentMark* _cm; 570 ConcurrentMarkThread* _cmThread; 571 bool _mark_in_progress; 572 573 // The concurrent refiner. 574 ConcurrentG1Refine* _cg1r; 575 576 // The concurrent zero-fill thread. 577 ConcurrentZFThread* _czft; 578 579 // The parallel task queues 580 RefToScanQueueSet *_task_queues; 581 582 // True iff a evacuation has failed in the current collection. 583 bool _evacuation_failed; 584 585 // Set the attribute indicating whether evacuation has failed in the 586 // current collection. 587 void set_evacuation_failed(bool b) { _evacuation_failed = b; } 588 589 // Failed evacuations cause some logical from-space objects to have 590 // forwarding pointers to themselves. Reset them. 591 void remove_self_forwarding_pointers(); 592 593 // When one is non-null, so is the other. Together, they each pair is 594 // an object with a preserved mark, and its mark value. 595 GrowableArray<oop>* _objs_with_preserved_marks; 596 GrowableArray<markOop>* _preserved_marks_of_objs; 597 598 // Preserve the mark of "obj", if necessary, in preparation for its mark 599 // word being overwritten with a self-forwarding-pointer. 600 void preserve_mark_if_necessary(oop obj, markOop m); 601 602 // The stack of evac-failure objects left to be scanned. 603 GrowableArray<oop>* _evac_failure_scan_stack; 604 // The closure to apply to evac-failure objects. 605 606 OopsInHeapRegionClosure* _evac_failure_closure; 607 // Set the field above. 608 void 609 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { 610 _evac_failure_closure = evac_failure_closure; 611 } 612 613 // Push "obj" on the scan stack. 614 void push_on_evac_failure_scan_stack(oop obj); 615 // Process scan stack entries until the stack is empty. 616 void drain_evac_failure_scan_stack(); 617 // True iff an invocation of "drain_scan_stack" is in progress; to 618 // prevent unnecessary recursion. 619 bool _drain_in_progress; 620 621 // Do any necessary initialization for evacuation-failure handling. 622 // "cl" is the closure that will be used to process evac-failure 623 // objects. 624 void init_for_evac_failure(OopsInHeapRegionClosure* cl); 625 // Do any necessary cleanup for evacuation-failure handling data 626 // structures. 627 void finalize_for_evac_failure(); 628 629 // An attempt to evacuate "obj" has failed; take necessary steps. 630 void handle_evacuation_failure(oop obj); 631 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); 632 void handle_evacuation_failure_common(oop obj, markOop m); 633 634 635 // Ensure that the relevant gc_alloc regions are set. 636 void get_gc_alloc_regions(); 637 // We're done with GC alloc regions. We are going to tear down the 638 // gc alloc list and remove the gc alloc tag from all the regions on 639 // that list. However, we will also retain the last (i.e., the one 640 // that is half-full) GC alloc region, per GCAllocPurpose, for 641 // possible reuse during the next collection, provided 642 // _retain_gc_alloc_region[] indicates that it should be the 643 // case. Said regions are kept in the _retained_gc_alloc_regions[] 644 // array. If the parameter totally is set, we will not retain any 645 // regions, irrespective of what _retain_gc_alloc_region[] 646 // indicates. 647 void release_gc_alloc_regions(bool totally); 648 #ifndef PRODUCT 649 // Useful for debugging. 650 void print_gc_alloc_regions(); 651 #endif // !PRODUCT 652 653 // ("Weak") Reference processing support 654 ReferenceProcessor* _ref_processor; 655 656 enum G1H_process_strong_roots_tasks { 657 G1H_PS_mark_stack_oops_do, 658 G1H_PS_refProcessor_oops_do, 659 // Leave this one last. 660 G1H_PS_NumElements 661 }; 662 663 SubTasksDone* _process_strong_tasks; 664 665 // List of regions which require zero filling. 666 UncleanRegionList _unclean_region_list; 667 bool _unclean_regions_coming; 668 669 public: 670 void set_refine_cte_cl_concurrency(bool concurrent); 671 672 RefToScanQueue *task_queue(int i) const; 673 674 // A set of cards where updates happened during the GC 675 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } 676 677 // A DirtyCardQueueSet that is used to hold cards that contain 678 // references into the current collection set. This is used to 679 // update the remembered sets of the regions in the collection 680 // set in the event of an evacuation failure. 681 DirtyCardQueueSet& into_cset_dirty_card_queue_set() 682 { return _into_cset_dirty_card_queue_set; } 683 684 // Create a G1CollectedHeap with the specified policy. 685 // Must call the initialize method afterwards. 686 // May not return if something goes wrong. 687 G1CollectedHeap(G1CollectorPolicy* policy); 688 689 // Initialize the G1CollectedHeap to have the initial and 690 // maximum sizes, permanent generation, and remembered and barrier sets 691 // specified by the policy object. 692 jint initialize(); 693 694 void ref_processing_init(); 695 696 void set_par_threads(int t) { 697 SharedHeap::set_par_threads(t); 698 _process_strong_tasks->set_par_threads(t); 699 } 700 701 virtual CollectedHeap::Name kind() const { 702 return CollectedHeap::G1CollectedHeap; 703 } 704 705 // The current policy object for the collector. 706 G1CollectorPolicy* g1_policy() const { return _g1_policy; } 707 708 // Adaptive size policy. No such thing for g1. 709 virtual AdaptiveSizePolicy* size_policy() { return NULL; } 710 711 // The rem set and barrier set. 712 G1RemSet* g1_rem_set() const { return _g1_rem_set; } 713 ModRefBarrierSet* mr_bs() const { return _mr_bs; } 714 715 // The rem set iterator. 716 HeapRegionRemSetIterator* rem_set_iterator(int i) { 717 return _rem_set_iterator[i]; 718 } 719 720 HeapRegionRemSetIterator* rem_set_iterator() { 721 return _rem_set_iterator[0]; 722 } 723 724 unsigned get_gc_time_stamp() { 725 return _gc_time_stamp; 726 } 727 728 void reset_gc_time_stamp() { 729 _gc_time_stamp = 0; 730 OrderAccess::fence(); 731 } 732 733 void increment_gc_time_stamp() { 734 ++_gc_time_stamp; 735 OrderAccess::fence(); 736 } 737 738 void iterate_dirty_card_closure(CardTableEntryClosure* cl, 739 DirtyCardQueue* into_cset_dcq, 740 bool concurrent, int worker_i); 741 742 // The shared block offset table array. 743 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } 744 745 // Reference Processing accessor 746 ReferenceProcessor* ref_processor() { return _ref_processor; } 747 748 // Reserved (g1 only; super method includes perm), capacity and the used 749 // portion in bytes. 750 size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); } 751 virtual size_t capacity() const; 752 virtual size_t used() const; 753 // This should be called when we're not holding the heap lock. The 754 // result might be a bit inaccurate. 755 size_t used_unlocked() const; 756 size_t recalculate_used() const; 757 #ifndef PRODUCT 758 size_t recalculate_used_regions() const; 759 #endif // PRODUCT 760 761 // These virtual functions do the actual allocation. 762 virtual HeapWord* mem_allocate(size_t word_size, 763 bool is_noref, 764 bool is_tlab, 765 bool* gc_overhead_limit_was_exceeded); 766 767 // Some heaps may offer a contiguous region for shared non-blocking 768 // allocation, via inlined code (by exporting the address of the top and 769 // end fields defining the extent of the contiguous allocation region.) 770 // But G1CollectedHeap doesn't yet support this. 771 772 // Return an estimate of the maximum allocation that could be performed 773 // without triggering any collection or expansion activity. In a 774 // generational collector, for example, this is probably the largest 775 // allocation that could be supported (without expansion) in the youngest 776 // generation. It is "unsafe" because no locks are taken; the result 777 // should be treated as an approximation, not a guarantee, for use in 778 // heuristic resizing decisions. 779 virtual size_t unsafe_max_alloc(); 780 781 virtual bool is_maximal_no_gc() const { 782 return _g1_storage.uncommitted_size() == 0; 783 } 784 785 // The total number of regions in the heap. 786 size_t n_regions(); 787 788 // The number of regions that are completely free. 789 size_t max_regions(); 790 791 // The number of regions that are completely free. 792 size_t free_regions(); 793 794 // The number of regions that are not completely free. 795 size_t used_regions() { return n_regions() - free_regions(); } 796 797 // True iff the ZF thread should run. 798 bool should_zf(); 799 800 // The number of regions available for "regular" expansion. 801 size_t expansion_regions() { return _expansion_regions; } 802 803 #ifndef PRODUCT 804 bool regions_accounted_for(); 805 bool print_region_accounting_info(); 806 void print_region_counts(); 807 #endif 808 809 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); 810 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); 811 812 void put_region_on_unclean_list(HeapRegion* r); 813 void put_region_on_unclean_list_locked(HeapRegion* r); 814 815 void prepend_region_list_on_unclean_list(UncleanRegionList* list); 816 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); 817 818 void set_unclean_regions_coming(bool b); 819 void set_unclean_regions_coming_locked(bool b); 820 // Wait for cleanup to be complete. 821 void wait_for_cleanup_complete(); 822 // Like above, but assumes that the calling thread owns the Heap_lock. 823 void wait_for_cleanup_complete_locked(); 824 825 // Return the head of the unclean list. 826 HeapRegion* peek_unclean_region_list_locked(); 827 // Remove and return the head of the unclean list. 828 HeapRegion* pop_unclean_region_list_locked(); 829 830 // List of regions which are zero filled and ready for allocation. 831 HeapRegion* _free_region_list; 832 // Number of elements on the free list. 833 size_t _free_region_list_size; 834 835 // If the head of the unclean list is ZeroFilled, move it to the free 836 // list. 837 bool move_cleaned_region_to_free_list_locked(); 838 bool move_cleaned_region_to_free_list(); 839 840 void put_free_region_on_list_locked(HeapRegion* r); 841 void put_free_region_on_list(HeapRegion* r); 842 843 // Remove and return the head element of the free list. 844 HeapRegion* pop_free_region_list_locked(); 845 846 // If "zero_filled" is true, we first try the free list, then we try the 847 // unclean list, zero-filling the result. If "zero_filled" is false, we 848 // first try the unclean list, then the zero-filled list. 849 HeapRegion* alloc_free_region_from_lists(bool zero_filled); 850 851 // Verify the integrity of the region lists. 852 void remove_allocated_regions_from_lists(); 853 bool verify_region_lists(); 854 bool verify_region_lists_locked(); 855 size_t unclean_region_list_length(); 856 size_t free_region_list_length(); 857 858 // Perform a collection of the heap; intended for use in implementing 859 // "System.gc". This probably implies as full a collection as the 860 // "CollectedHeap" supports. 861 virtual void collect(GCCause::Cause cause); 862 863 // The same as above but assume that the caller holds the Heap_lock. 864 void collect_locked(GCCause::Cause cause); 865 866 // This interface assumes that it's being called by the 867 // vm thread. It collects the heap assuming that the 868 // heap lock is already held and that we are executing in 869 // the context of the vm thread. 870 virtual void collect_as_vm_thread(GCCause::Cause cause); 871 872 // True iff a evacuation has failed in the most-recent collection. 873 bool evacuation_failed() { return _evacuation_failed; } 874 875 // Free a region if it is totally full of garbage. Returns the number of 876 // bytes freed (0 ==> didn't free it). 877 size_t free_region_if_totally_empty(HeapRegion *hr); 878 void free_region_if_totally_empty_work(HeapRegion *hr, 879 size_t& pre_used, 880 size_t& cleared_h_regions, 881 size_t& freed_regions, 882 UncleanRegionList* list, 883 bool par = false); 884 885 // If we've done free region work that yields the given changes, update 886 // the relevant global variables. 887 void finish_free_region_work(size_t pre_used, 888 size_t cleared_h_regions, 889 size_t freed_regions, 890 UncleanRegionList* list); 891 892 893 // Returns "TRUE" iff "p" points into the allocated area of the heap. 894 virtual bool is_in(const void* p) const; 895 896 // Return "TRUE" iff the given object address is within the collection 897 // set. 898 inline bool obj_in_cs(oop obj); 899 900 // Return "TRUE" iff the given object address is in the reserved 901 // region of g1 (excluding the permanent generation). 902 bool is_in_g1_reserved(const void* p) const { 903 return _g1_reserved.contains(p); 904 } 905 906 // Returns a MemRegion that corresponds to the space that has been 907 // committed in the heap 908 MemRegion g1_committed() { 909 return _g1_committed; 910 } 911 912 NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;) 913 914 // Dirty card table entries covering a list of young regions. 915 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); 916 917 // This resets the card table to all zeros. It is used after 918 // a collection pause which used the card table to claim cards. 919 void cleanUpCardTable(); 920 921 // Iteration functions. 922 923 // Iterate over all the ref-containing fields of all objects, calling 924 // "cl.do_oop" on each. 925 virtual void oop_iterate(OopClosure* cl) { 926 oop_iterate(cl, true); 927 } 928 void oop_iterate(OopClosure* cl, bool do_perm); 929 930 // Same as above, restricted to a memory region. 931 virtual void oop_iterate(MemRegion mr, OopClosure* cl) { 932 oop_iterate(mr, cl, true); 933 } 934 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); 935 936 // Iterate over all objects, calling "cl.do_object" on each. 937 virtual void object_iterate(ObjectClosure* cl) { 938 object_iterate(cl, true); 939 } 940 virtual void safe_object_iterate(ObjectClosure* cl) { 941 object_iterate(cl, true); 942 } 943 void object_iterate(ObjectClosure* cl, bool do_perm); 944 945 // Iterate over all objects allocated since the last collection, calling 946 // "cl.do_object" on each. The heap must have been initialized properly 947 // to support this function, or else this call will fail. 948 virtual void object_iterate_since_last_GC(ObjectClosure* cl); 949 950 // Iterate over all spaces in use in the heap, in ascending address order. 951 virtual void space_iterate(SpaceClosure* cl); 952 953 // Iterate over heap regions, in address order, terminating the 954 // iteration early if the "doHeapRegion" method returns "true". 955 void heap_region_iterate(HeapRegionClosure* blk); 956 957 // Iterate over heap regions starting with r (or the first region if "r" 958 // is NULL), in address order, terminating early if the "doHeapRegion" 959 // method returns "true". 960 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); 961 962 // As above but starting from the region at index idx. 963 void heap_region_iterate_from(int idx, HeapRegionClosure* blk); 964 965 HeapRegion* region_at(size_t idx); 966 967 // Divide the heap region sequence into "chunks" of some size (the number 968 // of regions divided by the number of parallel threads times some 969 // overpartition factor, currently 4). Assumes that this will be called 970 // in parallel by ParallelGCThreads worker threads with discinct worker 971 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel 972 // calls will use the same "claim_value", and that that claim value is 973 // different from the claim_value of any heap region before the start of 974 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by 975 // attempting to claim the first region in each chunk, and, if 976 // successful, applying the closure to each region in the chunk (and 977 // setting the claim value of the second and subsequent regions of the 978 // chunk.) For now requires that "doHeapRegion" always returns "false", 979 // i.e., that a closure never attempt to abort a traversal. 980 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, 981 int worker, 982 jint claim_value); 983 984 // It resets all the region claim values to the default. 985 void reset_heap_region_claim_values(); 986 987 #ifdef ASSERT 988 bool check_heap_region_claim_values(jint claim_value); 989 #endif // ASSERT 990 991 // Iterate over the regions (if any) in the current collection set. 992 void collection_set_iterate(HeapRegionClosure* blk); 993 994 // As above but starting from region r 995 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); 996 997 // Returns the first (lowest address) compactible space in the heap. 998 virtual CompactibleSpace* first_compactible_space(); 999 1000 // A CollectedHeap will contain some number of spaces. This finds the 1001 // space containing a given address, or else returns NULL. 1002 virtual Space* space_containing(const void* addr) const; 1003 1004 // A G1CollectedHeap will contain some number of heap regions. This 1005 // finds the region containing a given address, or else returns NULL. 1006 HeapRegion* heap_region_containing(const void* addr) const; 1007 1008 // Like the above, but requires "addr" to be in the heap (to avoid a 1009 // null-check), and unlike the above, may return an continuing humongous 1010 // region. 1011 HeapRegion* heap_region_containing_raw(const void* addr) const; 1012 1013 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 1014 // each address in the (reserved) heap is a member of exactly 1015 // one block. The defining characteristic of a block is that it is 1016 // possible to find its size, and thus to progress forward to the next 1017 // block. (Blocks may be of different sizes.) Thus, blocks may 1018 // represent Java objects, or they might be free blocks in a 1019 // free-list-based heap (or subheap), as long as the two kinds are 1020 // distinguishable and the size of each is determinable. 1021 1022 // Returns the address of the start of the "block" that contains the 1023 // address "addr". We say "blocks" instead of "object" since some heaps 1024 // may not pack objects densely; a chunk may either be an object or a 1025 // non-object. 1026 virtual HeapWord* block_start(const void* addr) const; 1027 1028 // Requires "addr" to be the start of a chunk, and returns its size. 1029 // "addr + size" is required to be the start of a new chunk, or the end 1030 // of the active area of the heap. 1031 virtual size_t block_size(const HeapWord* addr) const; 1032 1033 // Requires "addr" to be the start of a block, and returns "TRUE" iff 1034 // the block is an object. 1035 virtual bool block_is_obj(const HeapWord* addr) const; 1036 1037 // Does this heap support heap inspection? (+PrintClassHistogram) 1038 virtual bool supports_heap_inspection() const { return true; } 1039 1040 // Section on thread-local allocation buffers (TLABs) 1041 // See CollectedHeap for semantics. 1042 1043 virtual bool supports_tlab_allocation() const; 1044 virtual size_t tlab_capacity(Thread* thr) const; 1045 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 1046 virtual HeapWord* allocate_new_tlab(size_t word_size); 1047 1048 // Can a compiler initialize a new object without store barriers? 1049 // This permission only extends from the creation of a new object 1050 // via a TLAB up to the first subsequent safepoint. If such permission 1051 // is granted for this heap type, the compiler promises to call 1052 // defer_store_barrier() below on any slow path allocation of 1053 // a new object for which such initializing store barriers will 1054 // have been elided. G1, like CMS, allows this, but should be 1055 // ready to provide a compensating write barrier as necessary 1056 // if that storage came out of a non-young region. The efficiency 1057 // of this implementation depends crucially on being able to 1058 // answer very efficiently in constant time whether a piece of 1059 // storage in the heap comes from a young region or not. 1060 // See ReduceInitialCardMarks. 1061 virtual bool can_elide_tlab_store_barriers() const { 1062 // 6920090: Temporarily disabled, because of lingering 1063 // instabilities related to RICM with G1. In the 1064 // interim, the option ReduceInitialCardMarksForG1 1065 // below is left solely as a debugging device at least 1066 // until 6920109 fixes the instabilities. 1067 return ReduceInitialCardMarksForG1; 1068 } 1069 1070 virtual bool card_mark_must_follow_store() const { 1071 return true; 1072 } 1073 1074 bool is_in_young(oop obj) { 1075 HeapRegion* hr = heap_region_containing(obj); 1076 return hr != NULL && hr->is_young(); 1077 } 1078 1079 // We don't need barriers for initializing stores to objects 1080 // in the young gen: for the SATB pre-barrier, there is no 1081 // pre-value that needs to be remembered; for the remembered-set 1082 // update logging post-barrier, we don't maintain remembered set 1083 // information for young gen objects. Note that non-generational 1084 // G1 does not have any "young" objects, should not elide 1085 // the rs logging barrier and so should always answer false below. 1086 // However, non-generational G1 (-XX:-G1Gen) appears to have 1087 // bit-rotted so was not tested below. 1088 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 1089 // Re 6920090, 6920109 above. 1090 assert(ReduceInitialCardMarksForG1, "Else cannot be here"); 1091 assert(G1Gen || !is_in_young(new_obj), 1092 "Non-generational G1 should never return true below"); 1093 return is_in_young(new_obj); 1094 } 1095 1096 // Can a compiler elide a store barrier when it writes 1097 // a permanent oop into the heap? Applies when the compiler 1098 // is storing x to the heap, where x->is_perm() is true. 1099 virtual bool can_elide_permanent_oop_store_barriers() const { 1100 // At least until perm gen collection is also G1-ified, at 1101 // which point this should return false. 1102 return true; 1103 } 1104 1105 virtual bool allocs_are_zero_filled(); 1106 1107 // The boundary between a "large" and "small" array of primitives, in 1108 // words. 1109 virtual size_t large_typearray_limit(); 1110 1111 // Returns "true" iff the given word_size is "very large". 1112 static bool isHumongous(size_t word_size) { 1113 // Note this has to be strictly greater-than as the TLABs 1114 // are capped at the humongous thresold and we want to 1115 // ensure that we don't try to allocate a TLAB as 1116 // humongous and that we don't allocate a humongous 1117 // object in a TLAB. 1118 return word_size > _humongous_object_threshold_in_words; 1119 } 1120 1121 // Update mod union table with the set of dirty cards. 1122 void updateModUnion(); 1123 1124 // Set the mod union bits corresponding to the given memRegion. Note 1125 // that this is always a safe operation, since it doesn't clear any 1126 // bits. 1127 void markModUnionRange(MemRegion mr); 1128 1129 // Records the fact that a marking phase is no longer in progress. 1130 void set_marking_complete() { 1131 _mark_in_progress = false; 1132 } 1133 void set_marking_started() { 1134 _mark_in_progress = true; 1135 } 1136 bool mark_in_progress() { 1137 return _mark_in_progress; 1138 } 1139 1140 // Print the maximum heap capacity. 1141 virtual size_t max_capacity() const; 1142 1143 virtual jlong millis_since_last_gc(); 1144 1145 // Perform any cleanup actions necessary before allowing a verification. 1146 virtual void prepare_for_verify(); 1147 1148 // Perform verification. 1149 1150 // use_prev_marking == true -> use "prev" marking information, 1151 // use_prev_marking == false -> use "next" marking information 1152 // NOTE: Only the "prev" marking information is guaranteed to be 1153 // consistent most of the time, so most calls to this should use 1154 // use_prev_marking == true. Currently, there is only one case where 1155 // this is called with use_prev_marking == false, which is to verify 1156 // the "next" marking information at the end of remark. 1157 void verify(bool allow_dirty, bool silent, bool use_prev_marking); 1158 1159 // Override; it uses the "prev" marking information 1160 virtual void verify(bool allow_dirty, bool silent); 1161 // Default behavior by calling print(tty); 1162 virtual void print() const; 1163 // This calls print_on(st, PrintHeapAtGCExtended). 1164 virtual void print_on(outputStream* st) const; 1165 // If extended is true, it will print out information for all 1166 // regions in the heap by calling print_on_extended(st). 1167 virtual void print_on(outputStream* st, bool extended) const; 1168 virtual void print_on_extended(outputStream* st) const; 1169 1170 virtual void print_gc_threads_on(outputStream* st) const; 1171 virtual void gc_threads_do(ThreadClosure* tc) const; 1172 1173 // Override 1174 void print_tracing_info() const; 1175 1176 // If "addr" is a pointer into the (reserved?) heap, returns a positive 1177 // number indicating the "arena" within the heap in which "addr" falls. 1178 // Or else returns 0. 1179 virtual int addr_to_arena_id(void* addr) const; 1180 1181 // Convenience function to be used in situations where the heap type can be 1182 // asserted to be this type. 1183 static G1CollectedHeap* heap(); 1184 1185 void empty_young_list(); 1186 bool should_set_young_locked(); 1187 1188 void set_region_short_lived_locked(HeapRegion* hr); 1189 // add appropriate methods for any other surv rate groups 1190 1191 YoungList* young_list() { return _young_list; } 1192 1193 // debugging 1194 bool check_young_list_well_formed() { 1195 return _young_list->check_list_well_formed(); 1196 } 1197 1198 bool check_young_list_empty(bool check_heap, 1199 bool check_sample = true); 1200 1201 // *** Stuff related to concurrent marking. It's not clear to me that so 1202 // many of these need to be public. 1203 1204 // The functions below are helper functions that a subclass of 1205 // "CollectedHeap" can use in the implementation of its virtual 1206 // functions. 1207 // This performs a concurrent marking of the live objects in a 1208 // bitmap off to the side. 1209 void doConcurrentMark(); 1210 1211 // This is called from the marksweep collector which then does 1212 // a concurrent mark and verifies that the results agree with 1213 // the stop the world marking. 1214 void checkConcurrentMark(); 1215 void do_sync_mark(); 1216 1217 bool isMarkedPrev(oop obj) const; 1218 bool isMarkedNext(oop obj) const; 1219 1220 // use_prev_marking == true -> use "prev" marking information, 1221 // use_prev_marking == false -> use "next" marking information 1222 bool is_obj_dead_cond(const oop obj, 1223 const HeapRegion* hr, 1224 const bool use_prev_marking) const { 1225 if (use_prev_marking) { 1226 return is_obj_dead(obj, hr); 1227 } else { 1228 return is_obj_ill(obj, hr); 1229 } 1230 } 1231 1232 // Determine if an object is dead, given the object and also 1233 // the region to which the object belongs. An object is dead 1234 // iff a) it was not allocated since the last mark and b) it 1235 // is not marked. 1236 1237 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { 1238 return 1239 !hr->obj_allocated_since_prev_marking(obj) && 1240 !isMarkedPrev(obj); 1241 } 1242 1243 // This is used when copying an object to survivor space. 1244 // If the object is marked live, then we mark the copy live. 1245 // If the object is allocated since the start of this mark 1246 // cycle, then we mark the copy live. 1247 // If the object has been around since the previous mark 1248 // phase, and hasn't been marked yet during this phase, 1249 // then we don't mark it, we just wait for the 1250 // current marking cycle to get to it. 1251 1252 // This function returns true when an object has been 1253 // around since the previous marking and hasn't yet 1254 // been marked during this marking. 1255 1256 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { 1257 return 1258 !hr->obj_allocated_since_next_marking(obj) && 1259 !isMarkedNext(obj); 1260 } 1261 1262 // Determine if an object is dead, given only the object itself. 1263 // This will find the region to which the object belongs and 1264 // then call the region version of the same function. 1265 1266 // Added if it is in permanent gen it isn't dead. 1267 // Added if it is NULL it isn't dead. 1268 1269 // use_prev_marking == true -> use "prev" marking information, 1270 // use_prev_marking == false -> use "next" marking information 1271 bool is_obj_dead_cond(const oop obj, 1272 const bool use_prev_marking) { 1273 if (use_prev_marking) { 1274 return is_obj_dead(obj); 1275 } else { 1276 return is_obj_ill(obj); 1277 } 1278 } 1279 1280 bool is_obj_dead(const oop obj) { 1281 const HeapRegion* hr = heap_region_containing(obj); 1282 if (hr == NULL) { 1283 if (Universe::heap()->is_in_permanent(obj)) 1284 return false; 1285 else if (obj == NULL) return false; 1286 else return true; 1287 } 1288 else return is_obj_dead(obj, hr); 1289 } 1290 1291 bool is_obj_ill(const oop obj) { 1292 const HeapRegion* hr = heap_region_containing(obj); 1293 if (hr == NULL) { 1294 if (Universe::heap()->is_in_permanent(obj)) 1295 return false; 1296 else if (obj == NULL) return false; 1297 else return true; 1298 } 1299 else return is_obj_ill(obj, hr); 1300 } 1301 1302 // The following is just to alert the verification code 1303 // that a full collection has occurred and that the 1304 // remembered sets are no longer up to date. 1305 bool _full_collection; 1306 void set_full_collection() { _full_collection = true;} 1307 void clear_full_collection() {_full_collection = false;} 1308 bool full_collection() {return _full_collection;} 1309 1310 ConcurrentMark* concurrent_mark() const { return _cm; } 1311 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } 1312 1313 // The dirty cards region list is used to record a subset of regions 1314 // whose cards need clearing. The list if populated during the 1315 // remembered set scanning and drained during the card table 1316 // cleanup. Although the methods are reentrant, population/draining 1317 // phases must not overlap. For synchronization purposes the last 1318 // element on the list points to itself. 1319 HeapRegion* _dirty_cards_region_list; 1320 void push_dirty_cards_region(HeapRegion* hr); 1321 HeapRegion* pop_dirty_cards_region(); 1322 1323 public: 1324 void stop_conc_gc_threads(); 1325 1326 // <NEW PREDICTION> 1327 1328 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 1329 void check_if_region_is_too_expensive(double predicted_time_ms); 1330 size_t pending_card_num(); 1331 size_t max_pending_card_num(); 1332 size_t cards_scanned(); 1333 1334 // </NEW PREDICTION> 1335 1336 protected: 1337 size_t _max_heap_capacity; 1338 1339 // debug_only(static void check_for_valid_allocation_state();) 1340 1341 public: 1342 // Temporary: call to mark things unimplemented for the G1 heap (e.g., 1343 // MemoryService). In productization, we can make this assert false 1344 // to catch such places (as well as searching for calls to this...) 1345 static void g1_unimplemented(); 1346 1347 }; 1348 1349 #define use_local_bitmaps 1 1350 #define verify_local_bitmaps 0 1351 #define oop_buffer_length 256 1352 1353 #ifndef PRODUCT 1354 class GCLabBitMap; 1355 class GCLabBitMapClosure: public BitMapClosure { 1356 private: 1357 ConcurrentMark* _cm; 1358 GCLabBitMap* _bitmap; 1359 1360 public: 1361 GCLabBitMapClosure(ConcurrentMark* cm, 1362 GCLabBitMap* bitmap) { 1363 _cm = cm; 1364 _bitmap = bitmap; 1365 } 1366 1367 virtual bool do_bit(size_t offset); 1368 }; 1369 #endif // !PRODUCT 1370 1371 class GCLabBitMap: public BitMap { 1372 private: 1373 ConcurrentMark* _cm; 1374 1375 int _shifter; 1376 size_t _bitmap_word_covers_words; 1377 1378 // beginning of the heap 1379 HeapWord* _heap_start; 1380 1381 // this is the actual start of the GCLab 1382 HeapWord* _real_start_word; 1383 1384 // this is the actual end of the GCLab 1385 HeapWord* _real_end_word; 1386 1387 // this is the first word, possibly located before the actual start 1388 // of the GCLab, that corresponds to the first bit of the bitmap 1389 HeapWord* _start_word; 1390 1391 // size of a GCLab in words 1392 size_t _gclab_word_size; 1393 1394 static int shifter() { 1395 return MinObjAlignment - 1; 1396 } 1397 1398 // how many heap words does a single bitmap word corresponds to? 1399 static size_t bitmap_word_covers_words() { 1400 return BitsPerWord << shifter(); 1401 } 1402 1403 size_t gclab_word_size() const { 1404 return _gclab_word_size; 1405 } 1406 1407 // Calculates actual GCLab size in words 1408 size_t gclab_real_word_size() const { 1409 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word)) 1410 / BitsPerWord; 1411 } 1412 1413 static size_t bitmap_size_in_bits(size_t gclab_word_size) { 1414 size_t bits_in_bitmap = gclab_word_size >> shifter(); 1415 // We are going to ensure that the beginning of a word in this 1416 // bitmap also corresponds to the beginning of a word in the 1417 // global marking bitmap. To handle the case where a GCLab 1418 // starts from the middle of the bitmap, we need to add enough 1419 // space (i.e. up to a bitmap word) to ensure that we have 1420 // enough bits in the bitmap. 1421 return bits_in_bitmap + BitsPerWord - 1; 1422 } 1423 public: 1424 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size) 1425 : BitMap(bitmap_size_in_bits(gclab_word_size)), 1426 _cm(G1CollectedHeap::heap()->concurrent_mark()), 1427 _shifter(shifter()), 1428 _bitmap_word_covers_words(bitmap_word_covers_words()), 1429 _heap_start(heap_start), 1430 _gclab_word_size(gclab_word_size), 1431 _real_start_word(NULL), 1432 _real_end_word(NULL), 1433 _start_word(NULL) 1434 { 1435 guarantee( size_in_words() >= bitmap_size_in_words(), 1436 "just making sure"); 1437 } 1438 1439 inline unsigned heapWordToOffset(HeapWord* addr) { 1440 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; 1441 assert(offset < size(), "offset should be within bounds"); 1442 return offset; 1443 } 1444 1445 inline HeapWord* offsetToHeapWord(size_t offset) { 1446 HeapWord* addr = _start_word + (offset << _shifter); 1447 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); 1448 return addr; 1449 } 1450 1451 bool fields_well_formed() { 1452 bool ret1 = (_real_start_word == NULL) && 1453 (_real_end_word == NULL) && 1454 (_start_word == NULL); 1455 if (ret1) 1456 return true; 1457 1458 bool ret2 = _real_start_word >= _start_word && 1459 _start_word < _real_end_word && 1460 (_real_start_word + _gclab_word_size) == _real_end_word && 1461 (_start_word + _gclab_word_size + _bitmap_word_covers_words) 1462 > _real_end_word; 1463 return ret2; 1464 } 1465 1466 inline bool mark(HeapWord* addr) { 1467 guarantee(use_local_bitmaps, "invariant"); 1468 assert(fields_well_formed(), "invariant"); 1469 1470 if (addr >= _real_start_word && addr < _real_end_word) { 1471 assert(!isMarked(addr), "should not have already been marked"); 1472 1473 // first mark it on the bitmap 1474 at_put(heapWordToOffset(addr), true); 1475 1476 return true; 1477 } else { 1478 return false; 1479 } 1480 } 1481 1482 inline bool isMarked(HeapWord* addr) { 1483 guarantee(use_local_bitmaps, "invariant"); 1484 assert(fields_well_formed(), "invariant"); 1485 1486 return at(heapWordToOffset(addr)); 1487 } 1488 1489 void set_buffer(HeapWord* start) { 1490 guarantee(use_local_bitmaps, "invariant"); 1491 clear(); 1492 1493 assert(start != NULL, "invariant"); 1494 _real_start_word = start; 1495 _real_end_word = start + _gclab_word_size; 1496 1497 size_t diff = 1498 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; 1499 _start_word = start - diff; 1500 1501 assert(fields_well_formed(), "invariant"); 1502 } 1503 1504 #ifndef PRODUCT 1505 void verify() { 1506 // verify that the marks have been propagated 1507 GCLabBitMapClosure cl(_cm, this); 1508 iterate(&cl); 1509 } 1510 #endif // PRODUCT 1511 1512 void retire() { 1513 guarantee(use_local_bitmaps, "invariant"); 1514 assert(fields_well_formed(), "invariant"); 1515 1516 if (_start_word != NULL) { 1517 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); 1518 1519 // this means that the bitmap was set up for the GCLab 1520 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); 1521 1522 mark_bitmap->mostly_disjoint_range_union(this, 1523 0, // always start from the start of the bitmap 1524 _start_word, 1525 gclab_real_word_size()); 1526 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); 1527 1528 #ifndef PRODUCT 1529 if (use_local_bitmaps && verify_local_bitmaps) 1530 verify(); 1531 #endif // PRODUCT 1532 } else { 1533 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); 1534 } 1535 } 1536 1537 size_t bitmap_size_in_words() const { 1538 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord; 1539 } 1540 1541 }; 1542 1543 class G1ParGCAllocBuffer: public ParGCAllocBuffer { 1544 private: 1545 bool _retired; 1546 bool _during_marking; 1547 GCLabBitMap _bitmap; 1548 1549 public: 1550 G1ParGCAllocBuffer(size_t gclab_word_size) : 1551 ParGCAllocBuffer(gclab_word_size), 1552 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), 1553 _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size), 1554 _retired(false) 1555 { } 1556 1557 inline bool mark(HeapWord* addr) { 1558 guarantee(use_local_bitmaps, "invariant"); 1559 assert(_during_marking, "invariant"); 1560 return _bitmap.mark(addr); 1561 } 1562 1563 inline void set_buf(HeapWord* buf) { 1564 if (use_local_bitmaps && _during_marking) 1565 _bitmap.set_buffer(buf); 1566 ParGCAllocBuffer::set_buf(buf); 1567 _retired = false; 1568 } 1569 1570 inline void retire(bool end_of_gc, bool retain) { 1571 if (_retired) 1572 return; 1573 if (use_local_bitmaps && _during_marking) { 1574 _bitmap.retire(); 1575 } 1576 ParGCAllocBuffer::retire(end_of_gc, retain); 1577 _retired = true; 1578 } 1579 }; 1580 1581 class G1ParScanThreadState : public StackObj { 1582 protected: 1583 G1CollectedHeap* _g1h; 1584 RefToScanQueue* _refs; 1585 DirtyCardQueue _dcq; 1586 CardTableModRefBS* _ct_bs; 1587 G1RemSet* _g1_rem; 1588 1589 G1ParGCAllocBuffer _surviving_alloc_buffer; 1590 G1ParGCAllocBuffer _tenured_alloc_buffer; 1591 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; 1592 ageTable _age_table; 1593 1594 size_t _alloc_buffer_waste; 1595 size_t _undo_waste; 1596 1597 OopsInHeapRegionClosure* _evac_failure_cl; 1598 G1ParScanHeapEvacClosure* _evac_cl; 1599 G1ParScanPartialArrayClosure* _partial_scan_cl; 1600 1601 int _hash_seed; 1602 int _queue_num; 1603 1604 size_t _term_attempts; 1605 1606 double _start; 1607 double _start_strong_roots; 1608 double _strong_roots_time; 1609 double _start_term; 1610 double _term_time; 1611 1612 // Map from young-age-index (0 == not young, 1 is youngest) to 1613 // surviving words. base is what we get back from the malloc call 1614 size_t* _surviving_young_words_base; 1615 // this points into the array, as we use the first few entries for padding 1616 size_t* _surviving_young_words; 1617 1618 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) 1619 1620 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 1621 1622 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 1623 1624 DirtyCardQueue& dirty_card_queue() { return _dcq; } 1625 CardTableModRefBS* ctbs() { return _ct_bs; } 1626 1627 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { 1628 if (!from->is_survivor()) { 1629 _g1_rem->par_write_ref(from, p, tid); 1630 } 1631 } 1632 1633 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { 1634 // If the new value of the field points to the same region or 1635 // is the to-space, we don't need to include it in the Rset updates. 1636 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { 1637 size_t card_index = ctbs()->index_for(p); 1638 // If the card hasn't been added to the buffer, do it. 1639 if (ctbs()->mark_card_deferred(card_index)) { 1640 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); 1641 } 1642 } 1643 } 1644 1645 public: 1646 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num); 1647 1648 ~G1ParScanThreadState() { 1649 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 1650 } 1651 1652 RefToScanQueue* refs() { return _refs; } 1653 ageTable* age_table() { return &_age_table; } 1654 1655 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 1656 return _alloc_buffers[purpose]; 1657 } 1658 1659 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1660 size_t undo_waste() const { return _undo_waste; } 1661 1662 template <class T> void push_on_queue(T* ref) { 1663 assert(ref != NULL, "invariant"); 1664 assert(has_partial_array_mask(ref) || 1665 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant"); 1666 #ifdef ASSERT 1667 if (has_partial_array_mask(ref)) { 1668 oop p = clear_partial_array_mask(ref); 1669 // Verify that we point into the CS 1670 assert(_g1h->obj_in_cs(p), "Should be in CS"); 1671 } 1672 #endif 1673 refs()->push(ref); 1674 } 1675 1676 void pop_from_queue(StarTask& ref) { 1677 if (refs()->pop_local(ref)) { 1678 assert((oop*)ref != NULL, "pop_local() returned true"); 1679 assert(UseCompressedOops || !ref.is_narrow(), "Error"); 1680 assert(has_partial_array_mask((oop*)ref) || 1681 _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref) 1682 : oopDesc::load_decode_heap_oop((oop*)ref)), 1683 "invariant"); 1684 } else { 1685 StarTask null_task; 1686 ref = null_task; 1687 } 1688 } 1689 1690 void pop_from_overflow_queue(StarTask& ref) { 1691 StarTask new_ref; 1692 refs()->pop_overflow(new_ref); 1693 assert((oop*)new_ref != NULL, "pop() from a local non-empty stack"); 1694 assert(UseCompressedOops || !new_ref.is_narrow(), "Error"); 1695 assert(has_partial_array_mask((oop*)new_ref) || 1696 _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref) 1697 : oopDesc::load_decode_heap_oop((oop*)new_ref)), 1698 "invariant"); 1699 ref = new_ref; 1700 } 1701 1702 int refs_to_scan() { return refs()->size(); } 1703 int overflowed_refs_to_scan() { return refs()->overflow_stack()->length(); } 1704 1705 template <class T> void update_rs(HeapRegion* from, T* p, int tid) { 1706 if (G1DeferredRSUpdate) { 1707 deferred_rs_update(from, p, tid); 1708 } else { 1709 immediate_rs_update(from, p, tid); 1710 } 1711 } 1712 1713 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1714 1715 HeapWord* obj = NULL; 1716 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1717 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1718 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1719 assert(gclab_word_size == alloc_buf->word_sz(), 1720 "dynamic resizing is not supported"); 1721 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 1722 alloc_buf->retire(false, false); 1723 1724 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1725 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1726 // Otherwise. 1727 alloc_buf->set_buf(buf); 1728 1729 obj = alloc_buf->allocate(word_sz); 1730 assert(obj != NULL, "buffer was definitely big enough..."); 1731 } else { 1732 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1733 } 1734 return obj; 1735 } 1736 1737 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { 1738 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); 1739 if (obj != NULL) return obj; 1740 return allocate_slow(purpose, word_sz); 1741 } 1742 1743 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { 1744 if (alloc_buffer(purpose)->contains(obj)) { 1745 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), 1746 "should contain whole object"); 1747 alloc_buffer(purpose)->undo_allocation(obj, word_sz); 1748 } else { 1749 CollectedHeap::fill_with_object(obj, word_sz); 1750 add_to_undo_waste(word_sz); 1751 } 1752 } 1753 1754 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { 1755 _evac_failure_cl = evac_failure_cl; 1756 } 1757 OopsInHeapRegionClosure* evac_failure_closure() { 1758 return _evac_failure_cl; 1759 } 1760 1761 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { 1762 _evac_cl = evac_cl; 1763 } 1764 1765 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { 1766 _partial_scan_cl = partial_scan_cl; 1767 } 1768 1769 int* hash_seed() { return &_hash_seed; } 1770 int queue_num() { return _queue_num; } 1771 1772 size_t term_attempts() const { return _term_attempts; } 1773 void note_term_attempt() { _term_attempts++; } 1774 1775 void start_strong_roots() { 1776 _start_strong_roots = os::elapsedTime(); 1777 } 1778 void end_strong_roots() { 1779 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 1780 } 1781 double strong_roots_time() const { return _strong_roots_time; } 1782 1783 void start_term_time() { 1784 note_term_attempt(); 1785 _start_term = os::elapsedTime(); 1786 } 1787 void end_term_time() { 1788 _term_time += (os::elapsedTime() - _start_term); 1789 } 1790 double term_time() const { return _term_time; } 1791 1792 double elapsed_time() const { 1793 return os::elapsedTime() - _start; 1794 } 1795 1796 static void 1797 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 1798 void 1799 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; 1800 1801 size_t* surviving_young_words() { 1802 // We add on to hide entry 0 which accumulates surviving words for 1803 // age -1 regions (i.e. non-young ones) 1804 return _surviving_young_words; 1805 } 1806 1807 void retire_alloc_buffers() { 1808 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1809 size_t waste = _alloc_buffers[ap]->words_remaining(); 1810 add_to_alloc_buffer_waste(waste); 1811 _alloc_buffers[ap]->retire(true, false); 1812 } 1813 } 1814 1815 private: 1816 template <class T> void deal_with_reference(T* ref_to_scan) { 1817 if (has_partial_array_mask(ref_to_scan)) { 1818 _partial_scan_cl->do_oop_nv(ref_to_scan); 1819 } else { 1820 // Note: we can use "raw" versions of "region_containing" because 1821 // "obj_to_scan" is definitely in the heap, and is not in a 1822 // humongous region. 1823 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 1824 _evac_cl->set_region(r); 1825 _evac_cl->do_oop_nv(ref_to_scan); 1826 } 1827 } 1828 1829 public: 1830 void trim_queue() { 1831 // I've replicated the loop twice, first to drain the overflow 1832 // queue, second to drain the task queue. This is better than 1833 // having a single loop, which checks both conditions and, inside 1834 // it, either pops the overflow queue or the task queue, as each 1835 // loop is tighter. Also, the decision to drain the overflow queue 1836 // first is not arbitrary, as the overflow queue is not visible 1837 // to the other workers, whereas the task queue is. So, we want to 1838 // drain the "invisible" entries first, while allowing the other 1839 // workers to potentially steal the "visible" entries. 1840 1841 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { 1842 while (overflowed_refs_to_scan() > 0) { 1843 StarTask ref_to_scan; 1844 assert((oop*)ref_to_scan == NULL, "Constructed above"); 1845 pop_from_overflow_queue(ref_to_scan); 1846 // We shouldn't have pushed it on the queue if it was not 1847 // pointing into the CSet. 1848 assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant"); 1849 if (ref_to_scan.is_narrow()) { 1850 assert(UseCompressedOops, "Error"); 1851 narrowOop* p = (narrowOop*)ref_to_scan; 1852 assert(!has_partial_array_mask(p) && 1853 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); 1854 deal_with_reference(p); 1855 } else { 1856 oop* p = (oop*)ref_to_scan; 1857 assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) || 1858 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); 1859 deal_with_reference(p); 1860 } 1861 } 1862 1863 while (refs_to_scan() > 0) { 1864 StarTask ref_to_scan; 1865 assert((oop*)ref_to_scan == NULL, "Constructed above"); 1866 pop_from_queue(ref_to_scan); 1867 if ((oop*)ref_to_scan != NULL) { 1868 if (ref_to_scan.is_narrow()) { 1869 assert(UseCompressedOops, "Error"); 1870 narrowOop* p = (narrowOop*)ref_to_scan; 1871 assert(!has_partial_array_mask(p) && 1872 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); 1873 deal_with_reference(p); 1874 } else { 1875 oop* p = (oop*)ref_to_scan; 1876 assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) || 1877 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); 1878 deal_with_reference(p); 1879 } 1880 } 1881 } 1882 } 1883 } 1884 }; 1885 1886 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP