1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BarrierSet.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CardTable.inline.hpp" 29 #include "gc/g1/g1CardTableEntryClosure.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1ConcurrentRefine.hpp" 32 #include "gc/g1/g1DirtyCardQueue.hpp" 33 #include "gc/g1/g1FromCardCache.hpp" 34 #include "gc/g1/g1GCPhaseTimes.hpp" 35 #include "gc/g1/g1HotCardCache.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RootClosures.hpp" 38 #include "gc/g1/g1RemSet.hpp" 39 #include "gc/g1/g1SharedDirtyCardQueue.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionManager.inline.hpp" 42 #include "gc/g1/heapRegionRemSet.inline.hpp" 43 #include "gc/g1/sparsePRT.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/ptrQueue.hpp" 46 #include "gc/shared/suspendibleThreadSet.hpp" 47 #include "jfr/jfrEvents.hpp" 48 #include "memory/iterator.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "oops/access.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/os.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/globalDefinitions.hpp" 55 #include "utilities/stack.inline.hpp" 56 #include "utilities/ticks.hpp" 57 58 // Collects information about the overall heap root scan progress during an evacuation. 59 // 60 // Scanning the remembered sets works by first merging all sources of cards to be 61 // scanned (log buffers, hcc, remembered sets) into a single data structure to remove 62 // duplicates and simplify work distribution. 63 // 64 // During the following card scanning we not only scan this combined set of cards, but 65 // also remember that these were completely scanned. The following evacuation passes 66 // do not scan these cards again, and so need to be preserved across increments. 67 // 68 // The representation for all the cards to scan is the card table: cards can have 69 // one of three states during GC: 70 // - clean: these cards will not be scanned in this pass 71 // - dirty: these cards will be scanned in this pass 72 // - scanned: these cards have already been scanned in a previous pass 73 // 74 // After all evacuation is done, we reset the card table to clean. 75 // 76 // Work distribution occurs on "chunk" basis, i.e. contiguous ranges of cards. As an 77 // additional optimization, during card merging we remember which regions and which 78 // chunks actually contain cards to be scanned. Threads iterate only across these 79 // regions, and only compete for chunks containing any cards. 80 // 81 // Within these chunks, a worker scans the card table on "blocks" of cards, i.e. 82 // contiguous ranges of dirty cards to be scanned. These blocks are converted to actual 83 // memory ranges and then passed on to actual scanning. 84 class G1RemSetScanState : public CHeapObj<mtGC> { 85 class G1DirtyRegions; 86 87 size_t _max_regions; 88 89 // Has this region that is part of the regions in the collection set been processed yet. 90 typedef bool G1RemsetIterState; 91 92 G1RemsetIterState volatile* _collection_set_iter_state; 93 94 // Card table iteration claim for each heap region, from 0 (completely unscanned) 95 // to (>=) HeapRegion::CardsPerRegion (completely scanned). 96 uint volatile* _card_table_scan_state; 97 98 // Return "optimal" number of chunks per region we want to use for claiming areas 99 // within a region to claim. Dependent on the region size as proxy for the heap 100 // size, we limit the total number of chunks to limit memory usage and maintenance 101 // effort of that table vs. granularity of distributing scanning work. 102 // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions 103 // seems to be such a good trade-off. 104 static uint get_chunks_per_region(uint log_region_size) { 105 // Limit the expected input values to current known possible values of the 106 // (log) region size. Adjust as necessary after testing if changing the permissible 107 // values for region size. 108 assert(log_region_size >= 20 && log_region_size <= 25, 109 "expected value in [20,25], but got %u", log_region_size); 110 return 1u << (log_region_size / 2 - 7); 111 } 112 113 uint _scan_chunks_per_region; // Number of chunks per region. 114 uint8_t _log_scan_chunks_per_region; // Log of number of chunks per region. 115 bool* _region_scan_chunks; 116 size_t _num_total_scan_chunks; // Total number of elements in _region_scan_chunks. 117 uint8_t _scan_chunks_shift; // For conversion between card index and chunk index. 118 public: 119 uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; } 120 121 // Returns whether the chunk corresponding to the given region/card in region contain a 122 // dirty card, i.e. actually needs scanning. 123 bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const { 124 size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift); 125 assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 126 idx, _num_total_scan_chunks); 127 return _region_scan_chunks[idx]; 128 } 129 130 private: 131 // The complete set of regions which card table needs to be cleared at the end of GC because 132 // we scribbled all over them. 133 G1DirtyRegions* _all_dirty_regions; 134 // The set of regions which card table needs to be scanned for new dirty cards 135 // in the current evacuation pass. 136 G1DirtyRegions* _next_dirty_regions; 137 138 // Set of (unique) regions that can be added to concurrently. 139 class G1DirtyRegions : public CHeapObj<mtGC> { 140 uint* _buffer; 141 uint _cur_idx; 142 size_t _max_regions; 143 144 bool* _contains; 145 146 public: 147 G1DirtyRegions(size_t max_regions) : 148 _buffer(NEW_C_HEAP_ARRAY(uint, max_regions, mtGC)), 149 _cur_idx(0), 150 _max_regions(max_regions), 151 _contains(NEW_C_HEAP_ARRAY(bool, max_regions, mtGC)) { 152 153 reset(); 154 } 155 156 static size_t chunk_size() { return M; } 157 158 ~G1DirtyRegions() { 159 FREE_C_HEAP_ARRAY(uint, _buffer); 160 FREE_C_HEAP_ARRAY(bool, _contains); 161 } 162 163 void reset() { 164 _cur_idx = 0; 165 ::memset(_contains, false, _max_regions * sizeof(bool)); 166 } 167 168 uint size() const { return _cur_idx; } 169 170 uint at(uint idx) const { 171 assert(idx < _cur_idx, "Index %u beyond valid regions", idx); 172 return _buffer[idx]; 173 } 174 175 void add_dirty_region(uint region) { 176 if (_contains[region]) { 177 return; 178 } 179 180 bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false; 181 if (marked_as_dirty) { 182 uint allocated = Atomic::add(1u, &_cur_idx) - 1; 183 _buffer[allocated] = region; 184 } 185 } 186 187 // Creates the union of this and the other G1DirtyRegions. 188 void merge(const G1DirtyRegions* other) { 189 for (uint i = 0; i < other->size(); i++) { 190 uint region = other->at(i); 191 if (!_contains[region]) { 192 _buffer[_cur_idx++] = region; 193 _contains[region] = true; 194 } 195 } 196 } 197 }; 198 199 // Creates a snapshot of the current _top values at the start of collection to 200 // filter out card marks that we do not want to scan. 201 class G1ResetScanTopClosure : public HeapRegionClosure { 202 G1RemSetScanState* _scan_state; 203 204 public: 205 G1ResetScanTopClosure(G1RemSetScanState* scan_state) : _scan_state(scan_state) { } 206 207 virtual bool do_heap_region(HeapRegion* r) { 208 uint hrm_index = r->hrm_index(); 209 if (r->in_collection_set()) { 210 // Young regions had their card table marked as young at their allocation; 211 // we need to make sure that these marks are cleared at the end of GC, *but* 212 // they should not be scanned for cards. 213 // So directly add them to the "all_dirty_regions". 214 // Same for regions in the (initial) collection set: they may contain cards from 215 // the log buffers, make sure they are cleaned. 216 _scan_state->add_all_dirty_region(hrm_index); 217 } else if (r->is_old_or_humongous_or_archive()) { 218 _scan_state->set_scan_top(hrm_index, r->top()); 219 } 220 return false; 221 } 222 }; 223 // For each region, contains the maximum top() value to be used during this garbage 224 // collection. Subsumes common checks like filtering out everything but old and 225 // humongous regions outside the collection set. 226 // This is valid because we are not interested in scanning stray remembered set 227 // entries from free or archive regions. 228 HeapWord** _scan_top; 229 230 class G1ClearCardTableTask : public AbstractGangTask { 231 G1CollectedHeap* _g1h; 232 G1DirtyRegions* _regions; 233 uint _chunk_length; 234 235 uint volatile _cur_dirty_regions; 236 237 G1RemSetScanState* _scan_state; 238 239 public: 240 G1ClearCardTableTask(G1CollectedHeap* g1h, 241 G1DirtyRegions* regions, 242 uint chunk_length, 243 G1RemSetScanState* scan_state) : 244 AbstractGangTask("G1 Clear Card Table Task"), 245 _g1h(g1h), 246 _regions(regions), 247 _chunk_length(chunk_length), 248 _cur_dirty_regions(0), 249 _scan_state(scan_state) { 250 251 assert(chunk_length > 0, "must be"); 252 } 253 254 static uint chunk_size() { return M; } 255 256 void work(uint worker_id) { 257 while (_cur_dirty_regions < _regions->size()) { 258 uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 259 uint max = MIN2(next + _chunk_length, _regions->size()); 260 261 for (uint i = next; i < max; i++) { 262 HeapRegion* r = _g1h->region_at(_regions->at(i)); 263 if (!r->is_survivor()) { 264 r->clear_cardtable(); 265 } 266 } 267 } 268 } 269 }; 270 271 // Clear the card table of "dirty" regions. 272 void clear_card_table(WorkGang* workers) { 273 uint num_regions = _all_dirty_regions->size(); 274 275 if (num_regions == 0) { 276 return; 277 } 278 279 uint const num_chunks = (uint)(align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size()); 280 uint const num_workers = MIN2(num_chunks, workers->active_workers()); 281 uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion; 282 283 // Iterate over the dirty cards region list. 284 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this); 285 286 log_debug(gc, ergo)("Running %s using %u workers for %u " 287 "units of work for %u regions.", 288 cl.name(), num_workers, num_chunks, num_regions); 289 workers->run_task(&cl, num_workers); 290 291 #ifndef PRODUCT 292 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 293 #endif 294 } 295 296 public: 297 G1RemSetScanState() : 298 _max_regions(0), 299 _collection_set_iter_state(NULL), 300 _card_table_scan_state(NULL), 301 _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)), 302 _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)), 303 _region_scan_chunks(NULL), 304 _num_total_scan_chunks(0), 305 _scan_chunks_shift(0), 306 _all_dirty_regions(NULL), 307 _next_dirty_regions(NULL), 308 _scan_top(NULL) { 309 } 310 311 ~G1RemSetScanState() { 312 FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state); 313 FREE_C_HEAP_ARRAY(uint, _card_table_scan_state); 314 FREE_C_HEAP_ARRAY(bool, _region_scan_chunks); 315 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 316 } 317 318 void initialize(size_t max_regions) { 319 assert(_collection_set_iter_state == NULL, "Must not be initialized twice"); 320 _max_regions = max_regions; 321 _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 322 _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 323 _num_total_scan_chunks = max_regions * _scan_chunks_per_region; 324 _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC); 325 326 _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region); 327 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 328 } 329 330 void prepare() { 331 for (size_t i = 0; i < _max_regions; i++) { 332 _collection_set_iter_state[i] = false; 333 clear_scan_top((uint)i); 334 } 335 336 _all_dirty_regions = new G1DirtyRegions(_max_regions); 337 _next_dirty_regions = new G1DirtyRegions(_max_regions); 338 339 G1ResetScanTopClosure cl(this); 340 G1CollectedHeap::heap()->heap_region_iterate(&cl); 341 } 342 343 void prepare_for_merge_heap_roots() { 344 _all_dirty_regions->merge(_next_dirty_regions); 345 346 _next_dirty_regions->reset(); 347 for (size_t i = 0; i < _max_regions; i++) { 348 _card_table_scan_state[i] = 0; 349 } 350 351 ::memset(_region_scan_chunks, false, _num_total_scan_chunks * sizeof(*_region_scan_chunks)); 352 } 353 354 // Returns whether the given region contains cards we need to scan. The remembered 355 // set and other sources may contain cards that 356 // - are in uncommitted regions 357 // - are located in the collection set 358 // - are located in free regions 359 // as we do not clean up remembered sets before merging heap roots. 360 bool contains_cards_to_process(uint const region_idx) const { 361 HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx); 362 return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive()); 363 } 364 365 size_t num_visited_cards() const { 366 size_t result = 0; 367 for (uint i = 0; i < _num_total_scan_chunks; i++) { 368 if (_region_scan_chunks[i]) { 369 result++; 370 } 371 } 372 return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region); 373 } 374 375 size_t num_cards_in_dirty_regions() const { 376 return _next_dirty_regions->size() * HeapRegion::CardsPerRegion; 377 } 378 379 void set_chunk_region_dirty(size_t const region_card_idx) { 380 size_t chunk_idx = region_card_idx >> _scan_chunks_shift; 381 for (uint i = 0; i < _scan_chunks_per_region; i++) { 382 _region_scan_chunks[chunk_idx++] = true; 383 } 384 } 385 386 void set_chunk_dirty(size_t const card_idx) { 387 assert((card_idx >> _scan_chunks_shift) < _num_total_scan_chunks, 388 "Trying to access index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 389 card_idx >> _scan_chunks_shift, _num_total_scan_chunks); 390 size_t const chunk_idx = card_idx >> _scan_chunks_shift; 391 if (!_region_scan_chunks[chunk_idx]) { 392 _region_scan_chunks[chunk_idx] = true; 393 } 394 } 395 396 void cleanup(WorkGang* workers) { 397 _all_dirty_regions->merge(_next_dirty_regions); 398 399 clear_card_table(workers); 400 401 delete _all_dirty_regions; 402 _all_dirty_regions = NULL; 403 404 delete _next_dirty_regions; 405 _next_dirty_regions = NULL; 406 } 407 408 void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) { 409 uint num_regions = _next_dirty_regions->size(); 410 411 if (num_regions == 0) { 412 return; 413 } 414 415 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 416 417 WorkGang* workers = g1h->workers(); 418 uint const max_workers = workers->active_workers(); 419 420 uint const start_pos = num_regions * worker_id / max_workers; 421 uint cur = start_pos; 422 423 do { 424 bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur))); 425 guarantee(!result, "Not allowed to ask for early termination."); 426 cur++; 427 if (cur == _next_dirty_regions->size()) { 428 cur = 0; 429 } 430 } while (cur != start_pos); 431 } 432 433 // Attempt to claim the given region in the collection set for iteration. Returns true 434 // if this call caused the transition from Unclaimed to Claimed. 435 inline bool claim_collection_set_region(uint region) { 436 assert(region < _max_regions, "Tried to access invalid region %u", region); 437 if (_collection_set_iter_state[region]) { 438 return false; 439 } 440 return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false); 441 } 442 443 bool has_cards_to_scan(uint region) { 444 assert(region < _max_regions, "Tried to access invalid region %u", region); 445 return _card_table_scan_state[region] < HeapRegion::CardsPerRegion; 446 } 447 448 uint claim_cards_to_scan(uint region, uint increment) { 449 assert(region < _max_regions, "Tried to access invalid region %u", region); 450 return Atomic::add(increment, &_card_table_scan_state[region]) - increment; 451 } 452 453 void add_dirty_region(uint const region) { 454 #ifdef ASSERT 455 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 456 assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(), 457 "Region %u is not suitable for scanning, is %sin collection set or %s", 458 hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str()); 459 #endif 460 _next_dirty_regions->add_dirty_region(region); 461 } 462 463 void add_all_dirty_region(uint region) { 464 #ifdef ASSERT 465 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 466 assert(hr->in_collection_set(), 467 "Only add young regions to all dirty regions directly but %u is %s", 468 hr->hrm_index(), hr->get_short_type_str()); 469 #endif 470 _all_dirty_regions->add_dirty_region(region); 471 } 472 473 void set_scan_top(uint region_idx, HeapWord* value) { 474 _scan_top[region_idx] = value; 475 } 476 477 HeapWord* scan_top(uint region_idx) const { 478 return _scan_top[region_idx]; 479 } 480 481 void clear_scan_top(uint region_idx) { 482 set_scan_top(region_idx, NULL); 483 } 484 }; 485 486 G1RemSet::G1RemSet(G1CollectedHeap* g1h, 487 G1CardTable* ct, 488 G1HotCardCache* hot_card_cache) : 489 _scan_state(new G1RemSetScanState()), 490 _prev_period_summary(), 491 _g1h(g1h), 492 _num_conc_refined_cards(0), 493 _ct(ct), 494 _g1p(_g1h->policy()), 495 _hot_card_cache(hot_card_cache) { 496 } 497 498 G1RemSet::~G1RemSet() { 499 delete _scan_state; 500 } 501 502 uint G1RemSet::num_par_rem_sets() { 503 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 504 } 505 506 void G1RemSet::initialize(size_t capacity, uint max_regions) { 507 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 508 _scan_state->initialize(max_regions); 509 } 510 511 // Helper class to scan and detect ranges of cards that need to be scanned on the 512 // card table. 513 class G1CardTableScanner : public StackObj { 514 public: 515 typedef CardTable::CardValue CardValue; 516 517 private: 518 CardValue* const _base_addr; 519 520 CardValue* _cur_addr; 521 CardValue* const _end_addr; 522 523 static const size_t ToScanMask = G1CardTable::g1_card_already_scanned; 524 static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned; 525 526 bool cur_addr_aligned() const { 527 return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0; 528 } 529 530 bool cur_card_is_dirty() const { 531 CardValue value = *_cur_addr; 532 return (value & ToScanMask) == 0; 533 } 534 535 bool cur_word_of_cards_contains_any_dirty_card() const { 536 assert(cur_addr_aligned(), "Current address should be aligned"); 537 size_t const value = *(size_t*)_cur_addr; 538 return (~value & ExpandedToScanMask) != 0; 539 } 540 541 bool cur_word_of_cards_all_dirty_cards() const { 542 size_t const value = *(size_t*)_cur_addr; 543 return value == G1CardTable::WordAllDirty; 544 } 545 546 size_t get_and_advance_pos() { 547 _cur_addr++; 548 return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1; 549 } 550 551 public: 552 G1CardTableScanner(CardValue* start_card, size_t size) : 553 _base_addr(start_card), 554 _cur_addr(start_card), 555 _end_addr(start_card + size) { 556 557 assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card)); 558 assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size); 559 } 560 561 size_t find_next_dirty() { 562 while (!cur_addr_aligned()) { 563 if (cur_card_is_dirty()) { 564 return get_and_advance_pos(); 565 } 566 _cur_addr++; 567 } 568 569 assert(cur_addr_aligned(), "Current address should be aligned now."); 570 while (_cur_addr != _end_addr) { 571 if (cur_word_of_cards_contains_any_dirty_card()) { 572 for (size_t i = 0; i < sizeof(size_t); i++) { 573 if (cur_card_is_dirty()) { 574 return get_and_advance_pos(); 575 } 576 _cur_addr++; 577 } 578 assert(false, "Should not reach here given we detected a dirty card in the word."); 579 } 580 _cur_addr += sizeof(size_t); 581 } 582 return get_and_advance_pos(); 583 } 584 585 size_t find_next_non_dirty() { 586 assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area."); 587 588 while (!cur_addr_aligned()) { 589 if (!cur_card_is_dirty()) { 590 return get_and_advance_pos(); 591 } 592 _cur_addr++; 593 } 594 595 assert(cur_addr_aligned(), "Current address should be aligned now."); 596 while (_cur_addr != _end_addr) { 597 if (!cur_word_of_cards_all_dirty_cards()) { 598 for (size_t i = 0; i < sizeof(size_t); i++) { 599 if (!cur_card_is_dirty()) { 600 return get_and_advance_pos(); 601 } 602 _cur_addr++; 603 } 604 assert(false, "Should not reach here given we detected a non-dirty card in the word."); 605 } 606 _cur_addr += sizeof(size_t); 607 } 608 return get_and_advance_pos(); 609 } 610 }; 611 612 // Helper class to claim dirty chunks within the card table. 613 class G1CardTableChunkClaimer { 614 G1RemSetScanState* _scan_state; 615 uint _region_idx; 616 uint _cur_claim; 617 618 public: 619 G1CardTableChunkClaimer(G1RemSetScanState* scan_state, uint region_idx) : 620 _scan_state(scan_state), 621 _region_idx(region_idx), 622 _cur_claim(0) { 623 guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible."); 624 } 625 626 bool has_next() { 627 while (true) { 628 _cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size()); 629 if (_cur_claim >= HeapRegion::CardsPerRegion) { 630 return false; 631 } 632 if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) { 633 return true; 634 } 635 } 636 } 637 638 uint value() const { return _cur_claim; } 639 uint size() const { return _scan_state->scan_chunk_size(); } 640 }; 641 642 // Scans a heap region for dirty cards. 643 class G1ScanHRForRegionClosure : public HeapRegionClosure { 644 G1CollectedHeap* _g1h; 645 G1CardTable* _ct; 646 G1BlockOffsetTable* _bot; 647 648 G1ParScanThreadState* _pss; 649 650 G1RemSetScanState* _scan_state; 651 652 G1GCPhaseTimes::GCParPhases _phase; 653 654 uint _worker_id; 655 656 size_t _cards_scanned; 657 size_t _blocks_scanned; 658 size_t _chunks_claimed; 659 660 Tickspan _rem_set_root_scan_time; 661 Tickspan _rem_set_trim_partially_time; 662 663 // The address to which this thread already scanned (walked the heap) up to during 664 // card scanning (exclusive). 665 HeapWord* _scanned_to; 666 667 HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) { 668 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 669 G1ScanCardClosure card_cl(_g1h, _pss); 670 671 HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl); 672 assert(scanned_to != NULL, "Should be able to scan range"); 673 assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end())); 674 675 _pss->trim_queue_partially(); 676 return scanned_to; 677 } 678 679 void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) { 680 HeapWord* const card_start = _bot->address_for_index_raw(first_card); 681 #ifdef ASSERT 682 HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card); 683 assert(hr == NULL || hr->is_in_reserved(card_start), 684 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 685 #endif 686 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 687 if (card_start >= top) { 688 return; 689 } 690 691 HeapWord* scan_end = MIN2(card_start + (num_cards << BOTConstants::LogN_words), top); 692 if (_scanned_to >= scan_end) { 693 return; 694 } 695 MemRegion mr(MAX2(card_start, _scanned_to), scan_end); 696 _scanned_to = scan_memregion(region_idx_for_card, mr); 697 698 _cards_scanned += num_cards; 699 } 700 701 ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) { 702 _ct->mark_as_scanned(first_card, num_cards); 703 do_claimed_block(region_idx, first_card, num_cards); 704 _blocks_scanned++; 705 } 706 707 void scan_heap_roots(HeapRegion* r) { 708 EventGCPhaseParallel event; 709 uint const region_idx = r->hrm_index(); 710 711 ResourceMark rm; 712 713 G1CardTableChunkClaimer claim(_scan_state, region_idx); 714 715 // Set the current scan "finger" to NULL for every heap region to scan. Since 716 // the claim value is monotonically increasing, the check to not scan below this 717 // will filter out objects spanning chunks within the region too then, as opposed 718 // to resetting this value for every claim. 719 _scanned_to = NULL; 720 721 while (claim.has_next()) { 722 size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value(); 723 CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx); 724 725 G1CardTableScanner scan(base_addr, claim.size()); 726 727 size_t first_scan_idx = scan.find_next_dirty(); 728 while (first_scan_idx != claim.size()) { 729 assert(*_ct->byte_for_index(region_card_base_idx + first_scan_idx) <= 0x1, "is %d at region %u idx " SIZE_FORMAT, *_ct->byte_for_index(region_card_base_idx + first_scan_idx), region_idx, first_scan_idx); 730 731 size_t const last_scan_idx = scan.find_next_non_dirty(); 732 size_t const len = last_scan_idx - first_scan_idx; 733 734 do_card_block(region_idx, region_card_base_idx + first_scan_idx, len); 735 736 if (last_scan_idx == claim.size()) { 737 break; 738 } 739 740 first_scan_idx = scan.find_next_dirty(); 741 } 742 _chunks_claimed++; 743 } 744 745 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanHR)); 746 } 747 748 public: 749 G1ScanHRForRegionClosure(G1RemSetScanState* scan_state, 750 G1ParScanThreadState* pss, 751 uint worker_id, 752 G1GCPhaseTimes::GCParPhases phase) : 753 _g1h(G1CollectedHeap::heap()), 754 _ct(_g1h->card_table()), 755 _bot(_g1h->bot()), 756 _pss(pss), 757 _scan_state(scan_state), 758 _phase(phase), 759 _worker_id(worker_id), 760 _cards_scanned(0), 761 _blocks_scanned(0), 762 _chunks_claimed(0), 763 _rem_set_root_scan_time(), 764 _rem_set_trim_partially_time(), 765 _scanned_to(NULL) { 766 } 767 768 bool do_heap_region(HeapRegion* r) { 769 assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(), 770 "Should only be called on old gen non-collection set regions but region %u is not.", 771 r->hrm_index()); 772 uint const region_idx = r->hrm_index(); 773 774 if (_scan_state->has_cards_to_scan(region_idx)) { 775 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time); 776 scan_heap_roots(r); 777 } 778 return false; 779 } 780 781 Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; } 782 Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; } 783 784 size_t cards_scanned() const { return _cards_scanned; } 785 size_t blocks_scanned() const { return _blocks_scanned; } 786 size_t chunks_claimed() const { return _chunks_claimed; } 787 }; 788 789 void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss, 790 uint worker_id, 791 G1GCPhaseTimes::GCParPhases scan_phase, 792 G1GCPhaseTimes::GCParPhases objcopy_phase) { 793 G1ScanHRForRegionClosure cl(_scan_state, pss, worker_id, scan_phase); 794 _scan_state->iterate_dirty_regions_from(&cl, worker_id); 795 796 G1GCPhaseTimes* p = _g1p->phase_times(); 797 798 p->record_or_add_time_secs(objcopy_phase, worker_id, cl.rem_set_trim_partially_time().seconds()); 799 800 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_root_scan_time().seconds()); 801 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.cards_scanned(), G1GCPhaseTimes::ScanHRScannedCards); 802 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.blocks_scanned(), G1GCPhaseTimes::ScanHRScannedBlocks); 803 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.chunks_claimed(), G1GCPhaseTimes::ScanHRClaimedChunks); 804 } 805 806 // Heap region closure to be applied to all regions in the current collection set 807 // increment to fix up non-card related roots. 808 class G1ScanCollectionSetRegionClosure : public HeapRegionClosure { 809 G1ParScanThreadState* _pss; 810 G1RemSetScanState* _scan_state; 811 812 G1GCPhaseTimes::GCParPhases _scan_phase; 813 G1GCPhaseTimes::GCParPhases _code_roots_phase; 814 815 uint _worker_id; 816 817 size_t _opt_refs_scanned; 818 size_t _opt_refs_memory_used; 819 820 Tickspan _strong_code_root_scan_time; 821 Tickspan _strong_code_trim_partially_time; 822 823 Tickspan _rem_set_opt_root_scan_time; 824 Tickspan _rem_set_opt_trim_partially_time; 825 826 void scan_opt_rem_set_roots(HeapRegion* r) { 827 EventGCPhaseParallel event; 828 829 G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r); 830 831 G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss); 832 G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl); 833 _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops()); 834 _opt_refs_memory_used += opt_rem_set_list->used_memory(); 835 836 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase)); 837 } 838 839 public: 840 G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state, 841 G1ParScanThreadState* pss, 842 uint worker_i, 843 G1GCPhaseTimes::GCParPhases scan_phase, 844 G1GCPhaseTimes::GCParPhases code_roots_phase) : 845 _pss(pss), 846 _scan_state(scan_state), 847 _scan_phase(scan_phase), 848 _code_roots_phase(code_roots_phase), 849 _worker_id(worker_i), 850 _opt_refs_scanned(0), 851 _opt_refs_memory_used(0), 852 _strong_code_root_scan_time(), 853 _strong_code_trim_partially_time(), 854 _rem_set_opt_root_scan_time(), 855 _rem_set_opt_trim_partially_time() { } 856 857 bool do_heap_region(HeapRegion* r) { 858 uint const region_idx = r->hrm_index(); 859 860 // The individual references for the optional remembered set are per-worker, so we 861 // always need to scan them. 862 if (r->has_index_in_opt_cset()) { 863 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time); 864 scan_opt_rem_set_roots(r); 865 } 866 867 if (_scan_state->claim_collection_set_region(region_idx)) { 868 EventGCPhaseParallel event; 869 870 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time); 871 // Scan the strong code root list attached to the current region 872 r->strong_code_roots_do(_pss->closures()->weak_codeblobs()); 873 874 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase)); 875 } 876 877 return false; 878 } 879 880 Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time; } 881 Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; } 882 883 Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; } 884 Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; } 885 886 size_t opt_refs_scanned() const { return _opt_refs_scanned; } 887 size_t opt_refs_memory_used() const { return _opt_refs_memory_used; } 888 }; 889 890 void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss, 891 uint worker_id, 892 G1GCPhaseTimes::GCParPhases scan_phase, 893 G1GCPhaseTimes::GCParPhases coderoots_phase, 894 G1GCPhaseTimes::GCParPhases objcopy_phase) { 895 G1ScanCollectionSetRegionClosure cl(_scan_state, pss, worker_id, scan_phase, coderoots_phase); 896 _g1h->collection_set_iterate_increment_from(&cl, worker_id); 897 898 G1GCPhaseTimes* p = _g1h->phase_times(); 899 900 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_root_scan_time().seconds()); 901 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds()); 902 903 p->record_or_add_time_secs(coderoots_phase, worker_id, cl.strong_code_root_scan_time().seconds()); 904 p->add_time_secs(objcopy_phase, worker_id, cl.strong_code_root_trim_partially_time().seconds()); 905 906 // At this time we record some metrics only for the evacuations after the initial one. 907 if (scan_phase == G1GCPhaseTimes::OptScanHR) { 908 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanHRScannedOptRefs); 909 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanHRUsedMemory); 910 } 911 } 912 913 void G1RemSet::prepare_for_scan_heap_roots() { 914 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 915 dcqs.concatenate_logs(); 916 917 _scan_state->prepare(); 918 } 919 920 class G1MergeHeapRootsTask : public AbstractGangTask { 921 922 // Visitor for remembered sets, dropping entries onto the card table. 923 class G1MergeCardSetClosure : public HeapRegionClosure { 924 G1RemSetScanState* _scan_state; 925 G1CardTable* _ct; 926 927 uint _merged_sparse; 928 uint _merged_fine; 929 uint _merged_coarse; 930 931 // Returns if the region contains cards we need to scan. If so, remember that 932 // region in the current set of dirty regions. 933 bool remember_if_interesting(uint const region_idx) { 934 if (!_scan_state->contains_cards_to_process(region_idx)) { 935 return false; 936 } 937 _scan_state->add_dirty_region(region_idx); 938 return true; 939 } 940 public: 941 G1MergeCardSetClosure(G1RemSetScanState* scan_state) : 942 _scan_state(scan_state), 943 _ct(G1CollectedHeap::heap()->card_table()), 944 _merged_sparse(0), 945 _merged_fine(0), 946 _merged_coarse(0) { } 947 948 void next_coarse_prt(uint const region_idx) { 949 if (!remember_if_interesting(region_idx)) { 950 return; 951 } 952 953 _merged_coarse++; 954 955 size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 956 _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion); 957 _scan_state->set_chunk_region_dirty(region_base_idx); 958 } 959 960 void next_fine_prt(uint const region_idx, BitMap* bm) { 961 if (!remember_if_interesting(region_idx)) { 962 return; 963 } 964 965 _merged_fine++; 966 967 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 968 BitMap::idx_t cur = bm->get_next_one_offset(0); 969 while (cur != bm->size()) { 970 _ct->mark_clean_as_dirty(region_base_idx + cur); 971 _scan_state->set_chunk_dirty(region_base_idx + cur); 972 cur = bm->get_next_one_offset(cur + 1); 973 } 974 } 975 976 void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) { 977 if (!remember_if_interesting(region_idx)) { 978 return; 979 } 980 981 _merged_sparse++; 982 983 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 984 for (uint i = 0; i < num_cards; i++) { 985 size_t card_idx = region_base_idx + cards[i]; 986 _ct->mark_clean_as_dirty(card_idx); 987 _scan_state->set_chunk_dirty(card_idx); 988 } 989 } 990 991 virtual bool do_heap_region(HeapRegion* r) { 992 assert(r->in_collection_set() || r->is_starts_humongous(), "must be"); 993 994 HeapRegionRemSet* rem_set = r->rem_set(); 995 if (!rem_set->is_empty()) { 996 rem_set->iterate_prts(*this); 997 } 998 999 return false; 1000 } 1001 1002 size_t merged_sparse() const { return _merged_sparse; } 1003 size_t merged_fine() const { return _merged_fine; } 1004 size_t merged_coarse() const { return _merged_coarse; } 1005 }; 1006 1007 // Visitor for the remembered sets of humongous candidate regions to merge their 1008 // remembered set into the card table. 1009 class G1FlushHumongousCandidateRemSets : public HeapRegionClosure { 1010 G1MergeCardSetClosure _cl; 1011 1012 public: 1013 G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { } 1014 1015 virtual bool do_heap_region(HeapRegion* r) { 1016 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1017 1018 if (!r->is_starts_humongous() || 1019 !g1h->region_attr(r->hrm_index()).is_humongous() || 1020 r->rem_set()->is_empty()) { 1021 return false; 1022 } 1023 1024 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), 1025 "Found a not-small remembered set here. This is inconsistent with previous assumptions."); 1026 1027 _cl.do_heap_region(r); 1028 1029 // We should only clear the card based remembered set here as we will not 1030 // implicitly rebuild anything else during eager reclaim. Note that at the moment 1031 // (and probably never) we do not enter this path if there are other kind of 1032 // remembered sets for this region. 1033 r->rem_set()->clear_locked(true /* only_cardset */); 1034 // Clear_locked() above sets the state to Empty. However we want to continue 1035 // collecting remembered set entries for humongous regions that were not 1036 // reclaimed. 1037 r->rem_set()->set_state_complete(); 1038 #ifdef ASSERT 1039 G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index()); 1040 assert(region_attr.needs_remset_update(), "must be"); 1041 #endif 1042 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); 1043 1044 return false; 1045 } 1046 1047 size_t merged_sparse() const { return _cl.merged_sparse(); } 1048 size_t merged_fine() const { return _cl.merged_fine(); } 1049 size_t merged_coarse() const { return _cl.merged_coarse(); } 1050 }; 1051 1052 // Visitor for the log buffer entries to merge them into the card table. 1053 class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure { 1054 G1RemSetScanState* _scan_state; 1055 G1CardTable* _ct; 1056 1057 size_t _cards_dirty; 1058 size_t _cards_skipped; 1059 public: 1060 G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) : 1061 _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0) 1062 {} 1063 1064 void do_card_ptr(CardValue* card_ptr, uint worker_i) { 1065 // The only time we care about recording cards that 1066 // contain references that point into the collection set 1067 // is during RSet updating within an evacuation pause. 1068 // In this case worker_id should be the id of a GC worker thread. 1069 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 1070 1071 uint const region_idx = _ct->region_idx_for(card_ptr); 1072 1073 // The second clause must come after - the log buffers might contain cards to uncommited 1074 // regions. 1075 // This code may count duplicate entries in the log buffers (even if rare) multiple 1076 // times. 1077 if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) { 1078 _scan_state->add_dirty_region(region_idx); 1079 _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr)); 1080 _cards_dirty++; 1081 } else { 1082 // We may have had dirty cards in the (initial) collection set (or the 1083 // young regions which are always in the initial collection set). We do 1084 // not fix their cards here: we already added these regions to the set of 1085 // regions to clear the card table at the end during the prepare() phase. 1086 _cards_skipped++; 1087 } 1088 } 1089 1090 size_t cards_dirty() const { return _cards_dirty; } 1091 size_t cards_skipped() const { return _cards_skipped; } 1092 }; 1093 1094 HeapRegionClaimer _hr_claimer; 1095 G1RemSetScanState* _scan_state; 1096 BufferNode::Stack _dirty_card_buffers; 1097 bool _initial_evacuation; 1098 1099 volatile bool _fast_reclaim_handled; 1100 1101 void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) { 1102 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1103 size_t buffer_size = dcqs.buffer_size(); 1104 while (BufferNode* node = _dirty_card_buffers.pop()) { 1105 cl->apply_to_buffer(node, buffer_size, worker_id); 1106 dcqs.deallocate_buffer(node); 1107 } 1108 } 1109 1110 public: 1111 G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) : 1112 AbstractGangTask("G1 Merge Heap Roots"), 1113 _hr_claimer(num_workers), 1114 _scan_state(scan_state), 1115 _dirty_card_buffers(), 1116 _initial_evacuation(initial_evacuation), 1117 _fast_reclaim_handled(false) 1118 { 1119 if (initial_evacuation) { 1120 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1121 G1BufferNodeList buffers = dcqs.take_all_completed_buffers(); 1122 if (buffers._entry_count != 0) { 1123 _dirty_card_buffers.prepend(*buffers._head, *buffers._tail); 1124 } 1125 } 1126 } 1127 1128 virtual void work(uint worker_id) { 1129 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1130 G1GCPhaseTimes* p = g1h->phase_times(); 1131 1132 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ? 1133 G1GCPhaseTimes::MergeRS : 1134 G1GCPhaseTimes::OptMergeRS; 1135 1136 // We schedule flushing the remembered sets of humongous fast reclaim candidates 1137 // onto the card table first to allow the remaining parallelized tasks hide it. 1138 if (_initial_evacuation && 1139 p->fast_reclaim_humongous_candidates() > 0 && 1140 !_fast_reclaim_handled && 1141 !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) { 1142 1143 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id); 1144 1145 G1FlushHumongousCandidateRemSets cl(_scan_state); 1146 g1h->heap_region_iterate(&cl); 1147 1148 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1149 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1150 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1151 } 1152 1153 // Merge remembered sets of current candidates. 1154 { 1155 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */); 1156 G1MergeCardSetClosure cl(_scan_state); 1157 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id); 1158 1159 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1160 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1161 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1162 } 1163 1164 // Apply closure to log entries in the HCC. 1165 if (_initial_evacuation && G1HotCardCache::default_use_cache()) { 1166 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1167 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id); 1168 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1169 g1h->iterate_hcc_closure(&cl, worker_id); 1170 1171 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards); 1172 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards); 1173 } 1174 1175 // Now apply the closure to all remaining log entries. 1176 if (_initial_evacuation) { 1177 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1178 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id); 1179 1180 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1181 apply_closure_to_dirty_card_buffers(&cl, worker_id); 1182 1183 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards); 1184 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards); 1185 } 1186 } 1187 }; 1188 1189 void G1RemSet::print_merge_heap_roots_stats() { 1190 size_t num_visited_cards = _scan_state->num_visited_cards(); 1191 1192 size_t total_dirty_region_cards = _scan_state->num_cards_in_dirty_regions(); 1193 1194 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1195 size_t total_old_region_cards = 1196 (g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion; 1197 1198 log_debug(gc,remset)("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)", 1199 num_visited_cards, 1200 total_dirty_region_cards, 1201 percent_of(num_visited_cards, total_dirty_region_cards), 1202 total_old_region_cards, 1203 percent_of(num_visited_cards, total_old_region_cards)); 1204 } 1205 1206 void G1RemSet::merge_heap_roots(bool initial_evacuation) { 1207 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1208 1209 { 1210 Ticks start = Ticks::now(); 1211 1212 _scan_state->prepare_for_merge_heap_roots(); 1213 1214 Tickspan total = Ticks::now() - start; 1215 if (initial_evacuation) { 1216 g1h->phase_times()->record_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1217 } else { 1218 g1h->phase_times()->record_or_add_optional_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1219 } 1220 } 1221 1222 WorkGang* workers = g1h->workers(); 1223 size_t const increment_length = g1h->collection_set()->increment_length(); 1224 1225 uint const num_workers = initial_evacuation ? workers->active_workers() : 1226 MIN2(workers->active_workers(), (uint)increment_length); 1227 1228 { 1229 G1MergeHeapRootsTask cl(_scan_state, num_workers, initial_evacuation); 1230 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " regions", 1231 cl.name(), num_workers, increment_length); 1232 workers->run_task(&cl, num_workers); 1233 } 1234 1235 if (log_is_enabled(Debug, gc, remset)) { 1236 print_merge_heap_roots_stats(); 1237 } 1238 } 1239 1240 void G1RemSet::prepare_for_scan_heap_roots(uint region_idx) { 1241 _scan_state->clear_scan_top(region_idx); 1242 } 1243 1244 void G1RemSet::cleanup_after_scan_heap_roots() { 1245 G1GCPhaseTimes* phase_times = _g1h->phase_times(); 1246 1247 // Set all cards back to clean. 1248 double start = os::elapsedTime(); 1249 _scan_state->cleanup(_g1h->workers()); 1250 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 1251 } 1252 1253 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) { 1254 #ifdef ASSERT 1255 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1256 assert(g1h->is_in_exact(ct->addr_for(card_ptr)), 1257 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 1258 p2i(card_ptr), 1259 ct->index_for(ct->addr_for(card_ptr)), 1260 p2i(ct->addr_for(card_ptr)), 1261 g1h->addr_to_region(ct->addr_for(card_ptr))); 1262 #endif 1263 } 1264 1265 void G1RemSet::refine_card_concurrently(CardValue* card_ptr, 1266 uint worker_i) { 1267 assert(!_g1h->is_gc_active(), "Only call concurrently"); 1268 1269 // Construct the region representing the card. 1270 HeapWord* start = _ct->addr_for(card_ptr); 1271 // And find the region containing it. 1272 HeapRegion* r = _g1h->heap_region_containing_or_null(start); 1273 1274 // If this is a (stale) card into an uncommitted region, exit. 1275 if (r == NULL) { 1276 return; 1277 } 1278 1279 check_card_ptr(card_ptr, _ct); 1280 1281 // If the card is no longer dirty, nothing to do. 1282 if (*card_ptr != G1CardTable::dirty_card_val()) { 1283 return; 1284 } 1285 1286 // This check is needed for some uncommon cases where we should 1287 // ignore the card. 1288 // 1289 // The region could be young. Cards for young regions are 1290 // distinctly marked (set to g1_young_gen), so the post-barrier will 1291 // filter them out. However, that marking is performed 1292 // concurrently. A write to a young object could occur before the 1293 // card has been marked young, slipping past the filter. 1294 // 1295 // The card could be stale, because the region has been freed since 1296 // the card was recorded. In this case the region type could be 1297 // anything. If (still) free or (reallocated) young, just ignore 1298 // it. If (reallocated) old or humongous, the later card trimming 1299 // and additional checks in iteration may detect staleness. At 1300 // worst, we end up processing a stale card unnecessarily. 1301 // 1302 // In the normal (non-stale) case, the synchronization between the 1303 // enqueueing of the card and processing it here will have ensured 1304 // we see the up-to-date region type here. 1305 if (!r->is_old_or_humongous_or_archive()) { 1306 return; 1307 } 1308 1309 // The result from the hot card cache insert call is either: 1310 // * pointer to the current card 1311 // (implying that the current card is not 'hot'), 1312 // * null 1313 // (meaning we had inserted the card ptr into the "hot" card cache, 1314 // which had some headroom), 1315 // * a pointer to a "hot" card that was evicted from the "hot" cache. 1316 // 1317 1318 if (_hot_card_cache->use_cache()) { 1319 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 1320 1321 const CardValue* orig_card_ptr = card_ptr; 1322 card_ptr = _hot_card_cache->insert(card_ptr); 1323 if (card_ptr == NULL) { 1324 // There was no eviction. Nothing to do. 1325 return; 1326 } else if (card_ptr != orig_card_ptr) { 1327 // Original card was inserted and an old card was evicted. 1328 start = _ct->addr_for(card_ptr); 1329 r = _g1h->heap_region_containing(start); 1330 1331 // Check whether the region formerly in the cache should be 1332 // ignored, as discussed earlier for the original card. The 1333 // region could have been freed while in the cache. 1334 if (!r->is_old_or_humongous_or_archive()) { 1335 return; 1336 } 1337 } // Else we still have the original card. 1338 } 1339 1340 // Trim the region designated by the card to what's been allocated 1341 // in the region. The card could be stale, or the card could cover 1342 // (part of) an object at the end of the allocated space and extend 1343 // beyond the end of allocation. 1344 1345 // Non-humongous objects are only allocated in the old-gen during 1346 // GC, so if region is old then top is stable. Humongous object 1347 // allocation sets top last; if top has not yet been set, this is 1348 // a stale card and we'll end up with an empty intersection. If 1349 // this is not a stale card, the synchronization between the 1350 // enqueuing of the card and processing it here will have ensured 1351 // we see the up-to-date top here. 1352 HeapWord* scan_limit = r->top(); 1353 1354 if (scan_limit <= start) { 1355 // If the trimmed region is empty, the card must be stale. 1356 return; 1357 } 1358 1359 // Okay to clean and process the card now. There are still some 1360 // stale card cases that may be detected by iteration and dealt with 1361 // as iteration failure. 1362 *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val(); 1363 1364 // This fence serves two purposes. First, the card must be cleaned 1365 // before processing the contents. Second, we can't proceed with 1366 // processing until after the read of top, for synchronization with 1367 // possibly concurrent humongous object allocation. It's okay that 1368 // reading top and reading type were racy wrto each other. We need 1369 // both set, in any order, to proceed. 1370 OrderAccess::fence(); 1371 1372 // Don't use addr_for(card_ptr + 1) which can ask for 1373 // a card beyond the heap. 1374 HeapWord* end = start + G1CardTable::card_size_in_words; 1375 MemRegion dirty_region(start, MIN2(scan_limit, end)); 1376 assert(!dirty_region.is_empty(), "sanity"); 1377 1378 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i); 1379 if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) { 1380 _num_conc_refined_cards++; // Unsynchronized update, only used for logging. 1381 return; 1382 } 1383 1384 // If unable to process the card then we encountered an unparsable 1385 // part of the heap (e.g. a partially allocated object, so only 1386 // temporarily a problem) while processing a stale card. Despite 1387 // the card being stale, we can't simply ignore it, because we've 1388 // already marked the card cleaned, so taken responsibility for 1389 // ensuring the card gets scanned. 1390 // 1391 // However, the card might have gotten re-dirtied and re-enqueued 1392 // while we worked. (In fact, it's pretty likely.) 1393 if (*card_ptr == G1CardTable::dirty_card_val()) { 1394 return; 1395 } 1396 1397 // Re-dirty the card and enqueue in the *shared* queue. Can't use 1398 // the thread-local queue, because that might be the queue that is 1399 // being processed by us; we could be a Java thread conscripted to 1400 // perform refinement on our queue's current buffer. 1401 *card_ptr = G1CardTable::dirty_card_val(); 1402 G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr); 1403 } 1404 1405 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 1406 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 1407 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 1408 1409 G1RemSetSummary current(this); 1410 _prev_period_summary.subtract_from(¤t); 1411 1412 Log(gc, remset) log; 1413 log.trace("%s", header); 1414 ResourceMark rm; 1415 LogStream ls(log.trace()); 1416 _prev_period_summary.print_on(&ls); 1417 1418 _prev_period_summary.set(¤t); 1419 } 1420 } 1421 1422 void G1RemSet::print_summary_info() { 1423 Log(gc, remset, exit) log; 1424 if (log.is_trace()) { 1425 log.trace(" Cumulative RS summary"); 1426 G1RemSetSummary current(this); 1427 ResourceMark rm; 1428 LogStream ls(log.trace()); 1429 current.print_on(&ls); 1430 } 1431 } 1432 1433 class G1RebuildRemSetTask: public AbstractGangTask { 1434 // Aggregate the counting data that was constructed concurrently 1435 // with marking. 1436 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 1437 G1ConcurrentMark* _cm; 1438 G1RebuildRemSetClosure _update_cl; 1439 1440 // Applies _update_cl to the references of the given object, limiting objArrays 1441 // to the given MemRegion. Returns the amount of words actually scanned. 1442 size_t scan_for_references(oop const obj, MemRegion mr) { 1443 size_t const obj_size = obj->size(); 1444 // All non-objArrays and objArrays completely within the mr 1445 // can be scanned without passing the mr. 1446 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 1447 obj->oop_iterate(&_update_cl); 1448 return obj_size; 1449 } 1450 // This path is for objArrays crossing the given MemRegion. Only scan the 1451 // area within the MemRegion. 1452 obj->oop_iterate(&_update_cl, mr); 1453 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 1454 } 1455 1456 // A humongous object is live (with respect to the scanning) either 1457 // a) it is marked on the bitmap as such 1458 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 1459 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 1460 return bitmap->is_marked(humongous_obj) || (tars > tams); 1461 } 1462 1463 // Iterator over the live objects within the given MemRegion. 1464 class LiveObjIterator : public StackObj { 1465 const G1CMBitMap* const _bitmap; 1466 const HeapWord* _tams; 1467 const MemRegion _mr; 1468 HeapWord* _current; 1469 1470 bool is_below_tams() const { 1471 return _current < _tams; 1472 } 1473 1474 bool is_live(HeapWord* obj) const { 1475 return !is_below_tams() || _bitmap->is_marked(obj); 1476 } 1477 1478 HeapWord* bitmap_limit() const { 1479 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 1480 } 1481 1482 void move_if_below_tams() { 1483 if (is_below_tams() && has_next()) { 1484 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1485 } 1486 } 1487 public: 1488 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 1489 _bitmap(bitmap), 1490 _tams(tams), 1491 _mr(mr), 1492 _current(first_oop_into_mr) { 1493 1494 assert(_current <= _mr.start(), 1495 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 1496 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 1497 1498 // Step to the next live object within the MemRegion if needed. 1499 if (is_live(_current)) { 1500 // Non-objArrays were scanned by the previous part of that region. 1501 if (_current < mr.start() && !oop(_current)->is_objArray()) { 1502 _current += oop(_current)->size(); 1503 // We might have positioned _current on a non-live object. Reposition to the next 1504 // live one if needed. 1505 move_if_below_tams(); 1506 } 1507 } else { 1508 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 1509 // immediately. 1510 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1511 assert(_current == _mr.end() || is_live(_current), 1512 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 1513 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 1514 } 1515 } 1516 1517 void move_to_next() { 1518 _current += next()->size(); 1519 move_if_below_tams(); 1520 } 1521 1522 oop next() const { 1523 oop result = oop(_current); 1524 assert(is_live(_current), 1525 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 1526 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 1527 return result; 1528 } 1529 1530 bool has_next() const { 1531 return _current < _mr.end(); 1532 } 1533 }; 1534 1535 // Rebuild remembered sets in the part of the region specified by mr and hr. 1536 // Objects between the bottom of the region and the TAMS are checked for liveness 1537 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 1538 // Returns the number of live words between bottom and TAMS. 1539 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 1540 HeapWord* const top_at_mark_start, 1541 HeapWord* const top_at_rebuild_start, 1542 HeapRegion* hr, 1543 MemRegion mr) { 1544 size_t marked_words = 0; 1545 1546 if (hr->is_humongous()) { 1547 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 1548 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 1549 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 1550 // however in case of humongous objects it is sufficient to scan the encompassing 1551 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 1552 // two areas will be zero sized. I.e. TAMS is either 1553 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 1554 // value: this would mean that TAMS points somewhere into the object. 1555 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 1556 "More than one object in the humongous region?"); 1557 humongous_obj->oop_iterate(&_update_cl, mr); 1558 return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; 1559 } else { 1560 return 0; 1561 } 1562 } 1563 1564 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 1565 oop obj = it.next(); 1566 size_t scanned_size = scan_for_references(obj, mr); 1567 if ((HeapWord*)obj < top_at_mark_start) { 1568 marked_words += scanned_size; 1569 } 1570 } 1571 1572 return marked_words * HeapWordSize; 1573 } 1574 public: 1575 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 1576 G1ConcurrentMark* cm, 1577 uint worker_id) : 1578 HeapRegionClosure(), 1579 _cm(cm), 1580 _update_cl(g1h, worker_id) { } 1581 1582 bool do_heap_region(HeapRegion* hr) { 1583 if (_cm->has_aborted()) { 1584 return true; 1585 } 1586 1587 uint const region_idx = hr->hrm_index(); 1588 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 1589 assert(top_at_rebuild_start_check == NULL || 1590 top_at_rebuild_start_check > hr->bottom(), 1591 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 1592 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 1593 1594 size_t total_marked_bytes = 0; 1595 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 1596 1597 HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start(); 1598 1599 HeapWord* cur = hr->bottom(); 1600 while (cur < hr->end()) { 1601 // After every iteration (yield point) we need to check whether the region's 1602 // TARS changed due to e.g. eager reclaim. 1603 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 1604 if (top_at_rebuild_start == NULL) { 1605 return false; 1606 } 1607 1608 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 1609 if (next_chunk.is_empty()) { 1610 break; 1611 } 1612 1613 const Ticks start = Ticks::now(); 1614 size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(), 1615 top_at_mark_start, 1616 top_at_rebuild_start, 1617 hr, 1618 next_chunk); 1619 Tickspan time = Ticks::now() - start; 1620 1621 log_trace(gc, remset, tracking)("Rebuilt region %u " 1622 "live " SIZE_FORMAT " " 1623 "time %.3fms " 1624 "marked bytes " SIZE_FORMAT " " 1625 "bot " PTR_FORMAT " " 1626 "TAMS " PTR_FORMAT " " 1627 "TARS " PTR_FORMAT, 1628 region_idx, 1629 _cm->liveness(region_idx) * HeapWordSize, 1630 time.seconds() * 1000.0, 1631 marked_bytes, 1632 p2i(hr->bottom()), 1633 p2i(top_at_mark_start), 1634 p2i(top_at_rebuild_start)); 1635 1636 if (marked_bytes > 0) { 1637 total_marked_bytes += marked_bytes; 1638 } 1639 cur += chunk_size_in_words; 1640 1641 _cm->do_yield_check(); 1642 if (_cm->has_aborted()) { 1643 return true; 1644 } 1645 } 1646 // In the final iteration of the loop the region might have been eagerly reclaimed. 1647 // Simply filter out those regions. We can not just use region type because there 1648 // might have already been new allocations into these regions. 1649 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 1650 assert(top_at_rebuild_start == NULL || 1651 total_marked_bytes == hr->marked_bytes(), 1652 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " " 1653 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 1654 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(), 1655 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 1656 // Abort state may have changed after the yield check. 1657 return _cm->has_aborted(); 1658 } 1659 }; 1660 1661 HeapRegionClaimer _hr_claimer; 1662 G1ConcurrentMark* _cm; 1663 1664 uint _worker_id_offset; 1665 public: 1666 G1RebuildRemSetTask(G1ConcurrentMark* cm, 1667 uint n_workers, 1668 uint worker_id_offset) : 1669 AbstractGangTask("G1 Rebuild Remembered Set"), 1670 _hr_claimer(n_workers), 1671 _cm(cm), 1672 _worker_id_offset(worker_id_offset) { 1673 } 1674 1675 void work(uint worker_id) { 1676 SuspendibleThreadSetJoiner sts_join; 1677 1678 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1679 1680 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 1681 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 1682 } 1683 }; 1684 1685 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 1686 WorkGang* workers, 1687 uint worker_id_offset) { 1688 uint num_workers = workers->active_workers(); 1689 1690 G1RebuildRemSetTask cl(cm, 1691 num_workers, 1692 worker_id_offset); 1693 workers->run_task(&cl, num_workers); 1694 }