rev 57124 : imported patch 8227739-merge-scan-rs-update-rs-cost
1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BarrierSet.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CardTable.inline.hpp" 29 #include "gc/g1/g1CardTableEntryClosure.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1ConcurrentRefine.hpp" 32 #include "gc/g1/g1DirtyCardQueue.hpp" 33 #include "gc/g1/g1FromCardCache.hpp" 34 #include "gc/g1/g1GCPhaseTimes.hpp" 35 #include "gc/g1/g1HotCardCache.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RootClosures.hpp" 38 #include "gc/g1/g1RemSet.hpp" 39 #include "gc/g1/g1SharedDirtyCardQueue.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionManager.inline.hpp" 42 #include "gc/g1/heapRegionRemSet.inline.hpp" 43 #include "gc/g1/sparsePRT.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/ptrQueue.hpp" 46 #include "gc/shared/suspendibleThreadSet.hpp" 47 #include "jfr/jfrEvents.hpp" 48 #include "memory/iterator.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "oops/access.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/atomic.hpp" 53 #include "runtime/os.hpp" 54 #include "utilities/align.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 #include "utilities/stack.inline.hpp" 57 #include "utilities/ticks.hpp" 58 59 // Collects information about the overall heap root scan progress during an evacuation. 60 // 61 // Scanning the remembered sets works by first merging all sources of cards to be 62 // scanned (log buffers, hcc, remembered sets) into a single data structure to remove 63 // duplicates and simplify work distribution. 64 // 65 // During the following card scanning we not only scan this combined set of cards, but 66 // also remember that these were completely scanned. The following evacuation passes 67 // do not scan these cards again, and so need to be preserved across increments. 68 // 69 // The representation for all the cards to scan is the card table: cards can have 70 // one of three states during GC: 71 // - clean: these cards will not be scanned in this pass 72 // - dirty: these cards will be scanned in this pass 73 // - scanned: these cards have already been scanned in a previous pass 74 // 75 // After all evacuation is done, we reset the card table to clean. 76 // 77 // Work distribution occurs on "chunk" basis, i.e. contiguous ranges of cards. As an 78 // additional optimization, during card merging we remember which regions and which 79 // chunks actually contain cards to be scanned. Threads iterate only across these 80 // regions, and only compete for chunks containing any cards. 81 // 82 // Within these chunks, a worker scans the card table on "blocks" of cards, i.e. 83 // contiguous ranges of dirty cards to be scanned. These blocks are converted to actual 84 // memory ranges and then passed on to actual scanning. 85 class G1RemSetScanState : public CHeapObj<mtGC> { 86 class G1DirtyRegions; 87 88 size_t _max_regions; 89 90 // Has this region that is part of the regions in the collection set been processed yet. 91 typedef bool G1RemsetIterState; 92 93 G1RemsetIterState volatile* _collection_set_iter_state; 94 95 // Card table iteration claim for each heap region, from 0 (completely unscanned) 96 // to (>=) HeapRegion::CardsPerRegion (completely scanned). 97 uint volatile* _card_table_scan_state; 98 99 // Return "optimal" number of chunks per region we want to use for claiming areas 100 // within a region to claim. Dependent on the region size as proxy for the heap 101 // size, we limit the total number of chunks to limit memory usage and maintenance 102 // effort of that table vs. granularity of distributing scanning work. 103 // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions 104 // seems to be such a good trade-off. 105 static uint get_chunks_per_region(uint log_region_size) { 106 // Limit the expected input values to current known possible values of the 107 // (log) region size. Adjust as necessary after testing if changing the permissible 108 // values for region size. 109 assert(log_region_size >= 20 && log_region_size <= 25, 110 "expected value in [20,25], but got %u", log_region_size); 111 return 1u << (log_region_size / 2 - 7); 112 } 113 114 uint _scan_chunks_per_region; // Number of chunks per region. 115 uint8_t _log_scan_chunks_per_region; // Log of number of chunks per region. 116 bool* _region_scan_chunks; 117 size_t _num_total_scan_chunks; // Total number of elements in _region_scan_chunks. 118 uint8_t _scan_chunks_shift; // For conversion between card index and chunk index. 119 public: 120 uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; } 121 122 // Returns whether the chunk corresponding to the given region/card in region contain a 123 // dirty card, i.e. actually needs scanning. 124 bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const { 125 size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift); 126 assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 127 idx, _num_total_scan_chunks); 128 return _region_scan_chunks[idx]; 129 } 130 131 private: 132 // The complete set of regions which card table needs to be cleared at the end of GC because 133 // we scribbled all over them. 134 G1DirtyRegions* _all_dirty_regions; 135 // The set of regions which card table needs to be scanned for new dirty cards 136 // in the current evacuation pass. 137 G1DirtyRegions* _next_dirty_regions; 138 139 // Set of (unique) regions that can be added to concurrently. 140 class G1DirtyRegions : public CHeapObj<mtGC> { 141 uint* _buffer; 142 uint _cur_idx; 143 size_t _max_regions; 144 145 bool* _contains; 146 147 public: 148 G1DirtyRegions(size_t max_regions) : 149 _buffer(NEW_C_HEAP_ARRAY(uint, max_regions, mtGC)), 150 _cur_idx(0), 151 _max_regions(max_regions), 152 _contains(NEW_C_HEAP_ARRAY(bool, max_regions, mtGC)) { 153 154 reset(); 155 } 156 157 static size_t chunk_size() { return M; } 158 159 ~G1DirtyRegions() { 160 FREE_C_HEAP_ARRAY(uint, _buffer); 161 FREE_C_HEAP_ARRAY(bool, _contains); 162 } 163 164 void reset() { 165 _cur_idx = 0; 166 ::memset(_contains, false, _max_regions * sizeof(bool)); 167 } 168 169 uint size() const { return _cur_idx; } 170 171 uint at(uint idx) const { 172 assert(idx < _cur_idx, "Index %u beyond valid regions", idx); 173 return _buffer[idx]; 174 } 175 176 void add_dirty_region(uint region) { 177 if (_contains[region]) { 178 return; 179 } 180 181 bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false; 182 if (marked_as_dirty) { 183 uint allocated = Atomic::add(&_cur_idx, 1u) - 1; 184 _buffer[allocated] = region; 185 } 186 } 187 188 // Creates the union of this and the other G1DirtyRegions. 189 void merge(const G1DirtyRegions* other) { 190 for (uint i = 0; i < other->size(); i++) { 191 uint region = other->at(i); 192 if (!_contains[region]) { 193 _buffer[_cur_idx++] = region; 194 _contains[region] = true; 195 } 196 } 197 } 198 }; 199 200 // Creates a snapshot of the current _top values at the start of collection to 201 // filter out card marks that we do not want to scan. 202 class G1ResetScanTopClosure : public HeapRegionClosure { 203 G1RemSetScanState* _scan_state; 204 205 public: 206 G1ResetScanTopClosure(G1RemSetScanState* scan_state) : _scan_state(scan_state) { } 207 208 virtual bool do_heap_region(HeapRegion* r) { 209 uint hrm_index = r->hrm_index(); 210 if (r->in_collection_set()) { 211 // Young regions had their card table marked as young at their allocation; 212 // we need to make sure that these marks are cleared at the end of GC, *but* 213 // they should not be scanned for cards. 214 // So directly add them to the "all_dirty_regions". 215 // Same for regions in the (initial) collection set: they may contain cards from 216 // the log buffers, make sure they are cleaned. 217 _scan_state->add_all_dirty_region(hrm_index); 218 } else if (r->is_old_or_humongous_or_archive()) { 219 _scan_state->set_scan_top(hrm_index, r->top()); 220 } 221 return false; 222 } 223 }; 224 // For each region, contains the maximum top() value to be used during this garbage 225 // collection. Subsumes common checks like filtering out everything but old and 226 // humongous regions outside the collection set. 227 // This is valid because we are not interested in scanning stray remembered set 228 // entries from free or archive regions. 229 HeapWord** _scan_top; 230 231 class G1ClearCardTableTask : public AbstractGangTask { 232 G1CollectedHeap* _g1h; 233 G1DirtyRegions* _regions; 234 uint _chunk_length; 235 236 uint volatile _cur_dirty_regions; 237 238 G1RemSetScanState* _scan_state; 239 240 public: 241 G1ClearCardTableTask(G1CollectedHeap* g1h, 242 G1DirtyRegions* regions, 243 uint chunk_length, 244 G1RemSetScanState* scan_state) : 245 AbstractGangTask("G1 Clear Card Table Task"), 246 _g1h(g1h), 247 _regions(regions), 248 _chunk_length(chunk_length), 249 _cur_dirty_regions(0), 250 _scan_state(scan_state) { 251 252 assert(chunk_length > 0, "must be"); 253 } 254 255 static uint chunk_size() { return M; } 256 257 void work(uint worker_id) { 258 while (_cur_dirty_regions < _regions->size()) { 259 uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length; 260 uint max = MIN2(next + _chunk_length, _regions->size()); 261 262 for (uint i = next; i < max; i++) { 263 HeapRegion* r = _g1h->region_at(_regions->at(i)); 264 if (!r->is_survivor()) { 265 r->clear_cardtable(); 266 } 267 } 268 } 269 } 270 }; 271 272 // Clear the card table of "dirty" regions. 273 void clear_card_table(WorkGang* workers) { 274 uint num_regions = _all_dirty_regions->size(); 275 276 if (num_regions == 0) { 277 return; 278 } 279 280 uint const num_chunks = (uint)(align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size()); 281 uint const num_workers = MIN2(num_chunks, workers->active_workers()); 282 uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion; 283 284 // Iterate over the dirty cards region list. 285 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this); 286 287 log_debug(gc, ergo)("Running %s using %u workers for %u " 288 "units of work for %u regions.", 289 cl.name(), num_workers, num_chunks, num_regions); 290 workers->run_task(&cl, num_workers); 291 292 #ifndef PRODUCT 293 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 294 #endif 295 } 296 297 public: 298 G1RemSetScanState() : 299 _max_regions(0), 300 _collection_set_iter_state(NULL), 301 _card_table_scan_state(NULL), 302 _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)), 303 _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)), 304 _region_scan_chunks(NULL), 305 _num_total_scan_chunks(0), 306 _scan_chunks_shift(0), 307 _all_dirty_regions(NULL), 308 _next_dirty_regions(NULL), 309 _scan_top(NULL) { 310 } 311 312 ~G1RemSetScanState() { 313 FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state); 314 FREE_C_HEAP_ARRAY(uint, _card_table_scan_state); 315 FREE_C_HEAP_ARRAY(bool, _region_scan_chunks); 316 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 317 } 318 319 void initialize(size_t max_regions) { 320 assert(_collection_set_iter_state == NULL, "Must not be initialized twice"); 321 _max_regions = max_regions; 322 _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 323 _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 324 _num_total_scan_chunks = max_regions * _scan_chunks_per_region; 325 _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC); 326 327 _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region); 328 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 329 } 330 331 void prepare() { 332 for (size_t i = 0; i < _max_regions; i++) { 333 _collection_set_iter_state[i] = false; 334 clear_scan_top((uint)i); 335 } 336 337 _all_dirty_regions = new G1DirtyRegions(_max_regions); 338 _next_dirty_regions = new G1DirtyRegions(_max_regions); 339 340 G1ResetScanTopClosure cl(this); 341 G1CollectedHeap::heap()->heap_region_iterate(&cl); 342 } 343 344 void prepare_for_merge_heap_roots() { 345 _all_dirty_regions->merge(_next_dirty_regions); 346 347 _next_dirty_regions->reset(); 348 for (size_t i = 0; i < _max_regions; i++) { 349 _card_table_scan_state[i] = 0; 350 } 351 352 ::memset(_region_scan_chunks, false, _num_total_scan_chunks * sizeof(*_region_scan_chunks)); 353 } 354 355 // Returns whether the given region contains cards we need to scan. The remembered 356 // set and other sources may contain cards that 357 // - are in uncommitted regions 358 // - are located in the collection set 359 // - are located in free regions 360 // as we do not clean up remembered sets before merging heap roots. 361 bool contains_cards_to_process(uint const region_idx) const { 362 HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx); 363 return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive()); 364 } 365 366 size_t num_visited_cards() const { 367 size_t result = 0; 368 for (uint i = 0; i < _num_total_scan_chunks; i++) { 369 if (_region_scan_chunks[i]) { 370 result++; 371 } 372 } 373 return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region); 374 } 375 376 size_t num_cards_in_dirty_regions() const { 377 return _next_dirty_regions->size() * HeapRegion::CardsPerRegion; 378 } 379 380 void set_chunk_region_dirty(size_t const region_card_idx) { 381 size_t chunk_idx = region_card_idx >> _scan_chunks_shift; 382 for (uint i = 0; i < _scan_chunks_per_region; i++) { 383 _region_scan_chunks[chunk_idx++] = true; 384 } 385 } 386 387 void set_chunk_dirty(size_t const card_idx) { 388 assert((card_idx >> _scan_chunks_shift) < _num_total_scan_chunks, 389 "Trying to access index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 390 card_idx >> _scan_chunks_shift, _num_total_scan_chunks); 391 size_t const chunk_idx = card_idx >> _scan_chunks_shift; 392 if (!_region_scan_chunks[chunk_idx]) { 393 _region_scan_chunks[chunk_idx] = true; 394 } 395 } 396 397 void cleanup(WorkGang* workers) { 398 _all_dirty_regions->merge(_next_dirty_regions); 399 400 clear_card_table(workers); 401 402 delete _all_dirty_regions; 403 _all_dirty_regions = NULL; 404 405 delete _next_dirty_regions; 406 _next_dirty_regions = NULL; 407 } 408 409 void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) { 410 uint num_regions = _next_dirty_regions->size(); 411 412 if (num_regions == 0) { 413 return; 414 } 415 416 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 417 418 WorkGang* workers = g1h->workers(); 419 uint const max_workers = workers->active_workers(); 420 421 uint const start_pos = num_regions * worker_id / max_workers; 422 uint cur = start_pos; 423 424 do { 425 bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur))); 426 guarantee(!result, "Not allowed to ask for early termination."); 427 cur++; 428 if (cur == _next_dirty_regions->size()) { 429 cur = 0; 430 } 431 } while (cur != start_pos); 432 } 433 434 // Attempt to claim the given region in the collection set for iteration. Returns true 435 // if this call caused the transition from Unclaimed to Claimed. 436 inline bool claim_collection_set_region(uint region) { 437 assert(region < _max_regions, "Tried to access invalid region %u", region); 438 if (_collection_set_iter_state[region]) { 439 return false; 440 } 441 return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true); 442 } 443 444 bool has_cards_to_scan(uint region) { 445 assert(region < _max_regions, "Tried to access invalid region %u", region); 446 return _card_table_scan_state[region] < HeapRegion::CardsPerRegion; 447 } 448 449 uint claim_cards_to_scan(uint region, uint increment) { 450 assert(region < _max_regions, "Tried to access invalid region %u", region); 451 return Atomic::add(&_card_table_scan_state[region], increment) - increment; 452 } 453 454 void add_dirty_region(uint const region) { 455 #ifdef ASSERT 456 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 457 assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(), 458 "Region %u is not suitable for scanning, is %sin collection set or %s", 459 hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str()); 460 #endif 461 _next_dirty_regions->add_dirty_region(region); 462 } 463 464 void add_all_dirty_region(uint region) { 465 #ifdef ASSERT 466 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 467 assert(hr->in_collection_set(), 468 "Only add young regions to all dirty regions directly but %u is %s", 469 hr->hrm_index(), hr->get_short_type_str()); 470 #endif 471 _all_dirty_regions->add_dirty_region(region); 472 } 473 474 void set_scan_top(uint region_idx, HeapWord* value) { 475 _scan_top[region_idx] = value; 476 } 477 478 HeapWord* scan_top(uint region_idx) const { 479 return _scan_top[region_idx]; 480 } 481 482 void clear_scan_top(uint region_idx) { 483 set_scan_top(region_idx, NULL); 484 } 485 }; 486 487 G1RemSet::G1RemSet(G1CollectedHeap* g1h, 488 G1CardTable* ct, 489 G1HotCardCache* hot_card_cache) : 490 _scan_state(new G1RemSetScanState()), 491 _prev_period_summary(false), 492 _g1h(g1h), 493 _ct(ct), 494 _g1p(_g1h->policy()), 495 _hot_card_cache(hot_card_cache) { 496 } 497 498 G1RemSet::~G1RemSet() { 499 delete _scan_state; 500 } 501 502 uint G1RemSet::num_par_rem_sets() { 503 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 504 } 505 506 void G1RemSet::initialize(size_t capacity, uint max_regions) { 507 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 508 _scan_state->initialize(max_regions); 509 } 510 511 // Helper class to scan and detect ranges of cards that need to be scanned on the 512 // card table. 513 class G1CardTableScanner : public StackObj { 514 public: 515 typedef CardTable::CardValue CardValue; 516 517 private: 518 CardValue* const _base_addr; 519 520 CardValue* _cur_addr; 521 CardValue* const _end_addr; 522 523 static const size_t ToScanMask = G1CardTable::g1_card_already_scanned; 524 static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned; 525 526 bool cur_addr_aligned() const { 527 return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0; 528 } 529 530 bool cur_card_is_dirty() const { 531 CardValue value = *_cur_addr; 532 return (value & ToScanMask) == 0; 533 } 534 535 bool cur_word_of_cards_contains_any_dirty_card() const { 536 assert(cur_addr_aligned(), "Current address should be aligned"); 537 size_t const value = *(size_t*)_cur_addr; 538 return (~value & ExpandedToScanMask) != 0; 539 } 540 541 bool cur_word_of_cards_all_dirty_cards() const { 542 size_t const value = *(size_t*)_cur_addr; 543 return value == G1CardTable::WordAllDirty; 544 } 545 546 size_t get_and_advance_pos() { 547 _cur_addr++; 548 return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1; 549 } 550 551 public: 552 G1CardTableScanner(CardValue* start_card, size_t size) : 553 _base_addr(start_card), 554 _cur_addr(start_card), 555 _end_addr(start_card + size) { 556 557 assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card)); 558 assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size); 559 } 560 561 size_t find_next_dirty() { 562 while (!cur_addr_aligned()) { 563 if (cur_card_is_dirty()) { 564 return get_and_advance_pos(); 565 } 566 _cur_addr++; 567 } 568 569 assert(cur_addr_aligned(), "Current address should be aligned now."); 570 while (_cur_addr != _end_addr) { 571 if (cur_word_of_cards_contains_any_dirty_card()) { 572 for (size_t i = 0; i < sizeof(size_t); i++) { 573 if (cur_card_is_dirty()) { 574 return get_and_advance_pos(); 575 } 576 _cur_addr++; 577 } 578 assert(false, "Should not reach here given we detected a dirty card in the word."); 579 } 580 _cur_addr += sizeof(size_t); 581 } 582 return get_and_advance_pos(); 583 } 584 585 size_t find_next_non_dirty() { 586 assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area."); 587 588 while (!cur_addr_aligned()) { 589 if (!cur_card_is_dirty()) { 590 return get_and_advance_pos(); 591 } 592 _cur_addr++; 593 } 594 595 assert(cur_addr_aligned(), "Current address should be aligned now."); 596 while (_cur_addr != _end_addr) { 597 if (!cur_word_of_cards_all_dirty_cards()) { 598 for (size_t i = 0; i < sizeof(size_t); i++) { 599 if (!cur_card_is_dirty()) { 600 return get_and_advance_pos(); 601 } 602 _cur_addr++; 603 } 604 assert(false, "Should not reach here given we detected a non-dirty card in the word."); 605 } 606 _cur_addr += sizeof(size_t); 607 } 608 return get_and_advance_pos(); 609 } 610 }; 611 612 // Helper class to claim dirty chunks within the card table. 613 class G1CardTableChunkClaimer { 614 G1RemSetScanState* _scan_state; 615 uint _region_idx; 616 uint _cur_claim; 617 618 public: 619 G1CardTableChunkClaimer(G1RemSetScanState* scan_state, uint region_idx) : 620 _scan_state(scan_state), 621 _region_idx(region_idx), 622 _cur_claim(0) { 623 guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible."); 624 } 625 626 bool has_next() { 627 while (true) { 628 _cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size()); 629 if (_cur_claim >= HeapRegion::CardsPerRegion) { 630 return false; 631 } 632 if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) { 633 return true; 634 } 635 } 636 } 637 638 uint value() const { return _cur_claim; } 639 uint size() const { return _scan_state->scan_chunk_size(); } 640 }; 641 642 // Scans a heap region for dirty cards. 643 class G1ScanHRForRegionClosure : public HeapRegionClosure { 644 G1CollectedHeap* _g1h; 645 G1CardTable* _ct; 646 G1BlockOffsetTable* _bot; 647 648 G1ParScanThreadState* _pss; 649 650 G1RemSetScanState* _scan_state; 651 652 G1GCPhaseTimes::GCParPhases _phase; 653 654 uint _worker_id; 655 656 size_t _cards_scanned; 657 size_t _blocks_scanned; 658 size_t _chunks_claimed; 659 660 Tickspan _rem_set_root_scan_time; 661 Tickspan _rem_set_trim_partially_time; 662 663 // The address to which this thread already scanned (walked the heap) up to during 664 // card scanning (exclusive). 665 HeapWord* _scanned_to; 666 667 HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) { 668 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 669 G1ScanCardClosure card_cl(_g1h, _pss); 670 671 HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl); 672 assert(scanned_to != NULL, "Should be able to scan range"); 673 assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end())); 674 675 _pss->trim_queue_partially(); 676 return scanned_to; 677 } 678 679 void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) { 680 HeapWord* const card_start = _bot->address_for_index_raw(first_card); 681 #ifdef ASSERT 682 HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card); 683 assert(hr == NULL || hr->is_in_reserved(card_start), 684 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 685 #endif 686 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 687 if (card_start >= top) { 688 return; 689 } 690 691 HeapWord* scan_end = MIN2(card_start + (num_cards << BOTConstants::LogN_words), top); 692 if (_scanned_to >= scan_end) { 693 return; 694 } 695 MemRegion mr(MAX2(card_start, _scanned_to), scan_end); 696 _scanned_to = scan_memregion(region_idx_for_card, mr); 697 698 _cards_scanned += num_cards; 699 } 700 701 ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) { 702 _ct->mark_as_scanned(first_card, num_cards); 703 do_claimed_block(region_idx, first_card, num_cards); 704 _blocks_scanned++; 705 } 706 707 void scan_heap_roots(HeapRegion* r) { 708 EventGCPhaseParallel event; 709 uint const region_idx = r->hrm_index(); 710 711 ResourceMark rm; 712 713 G1CardTableChunkClaimer claim(_scan_state, region_idx); 714 715 // Set the current scan "finger" to NULL for every heap region to scan. Since 716 // the claim value is monotonically increasing, the check to not scan below this 717 // will filter out objects spanning chunks within the region too then, as opposed 718 // to resetting this value for every claim. 719 _scanned_to = NULL; 720 721 while (claim.has_next()) { 722 size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value(); 723 CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx); 724 725 G1CardTableScanner scan(base_addr, claim.size()); 726 727 size_t first_scan_idx = scan.find_next_dirty(); 728 while (first_scan_idx != claim.size()) { 729 assert(*_ct->byte_for_index(region_card_base_idx + first_scan_idx) <= 0x1, "is %d at region %u idx " SIZE_FORMAT, *_ct->byte_for_index(region_card_base_idx + first_scan_idx), region_idx, first_scan_idx); 730 731 size_t const last_scan_idx = scan.find_next_non_dirty(); 732 size_t const len = last_scan_idx - first_scan_idx; 733 734 do_card_block(region_idx, region_card_base_idx + first_scan_idx, len); 735 736 if (last_scan_idx == claim.size()) { 737 break; 738 } 739 740 first_scan_idx = scan.find_next_dirty(); 741 } 742 _chunks_claimed++; 743 } 744 745 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanHR)); 746 } 747 748 public: 749 G1ScanHRForRegionClosure(G1RemSetScanState* scan_state, 750 G1ParScanThreadState* pss, 751 uint worker_id, 752 G1GCPhaseTimes::GCParPhases phase) : 753 _g1h(G1CollectedHeap::heap()), 754 _ct(_g1h->card_table()), 755 _bot(_g1h->bot()), 756 _pss(pss), 757 _scan_state(scan_state), 758 _phase(phase), 759 _worker_id(worker_id), 760 _cards_scanned(0), 761 _blocks_scanned(0), 762 _chunks_claimed(0), 763 _rem_set_root_scan_time(), 764 _rem_set_trim_partially_time(), 765 _scanned_to(NULL) { 766 } 767 768 bool do_heap_region(HeapRegion* r) { 769 assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(), 770 "Should only be called on old gen non-collection set regions but region %u is not.", 771 r->hrm_index()); 772 uint const region_idx = r->hrm_index(); 773 774 if (_scan_state->has_cards_to_scan(region_idx)) { 775 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time); 776 scan_heap_roots(r); 777 } 778 return false; 779 } 780 781 Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; } 782 Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; } 783 784 size_t cards_scanned() const { return _cards_scanned; } 785 size_t blocks_scanned() const { return _blocks_scanned; } 786 size_t chunks_claimed() const { return _chunks_claimed; } 787 }; 788 789 void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss, 790 uint worker_id, 791 G1GCPhaseTimes::GCParPhases scan_phase, 792 G1GCPhaseTimes::GCParPhases objcopy_phase) { 793 G1ScanHRForRegionClosure cl(_scan_state, pss, worker_id, scan_phase); 794 _scan_state->iterate_dirty_regions_from(&cl, worker_id); 795 796 G1GCPhaseTimes* p = _g1p->phase_times(); 797 798 p->record_or_add_time_secs(objcopy_phase, worker_id, cl.rem_set_trim_partially_time().seconds()); 799 800 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_root_scan_time().seconds()); 801 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.cards_scanned(), G1GCPhaseTimes::ScanHRScannedCards); 802 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.blocks_scanned(), G1GCPhaseTimes::ScanHRScannedBlocks); 803 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.chunks_claimed(), G1GCPhaseTimes::ScanHRClaimedChunks); 804 } 805 806 // Heap region closure to be applied to all regions in the current collection set 807 // increment to fix up non-card related roots. 808 class G1ScanCollectionSetRegionClosure : public HeapRegionClosure { 809 G1ParScanThreadState* _pss; 810 G1RemSetScanState* _scan_state; 811 812 G1GCPhaseTimes::GCParPhases _scan_phase; 813 G1GCPhaseTimes::GCParPhases _code_roots_phase; 814 815 uint _worker_id; 816 817 size_t _opt_refs_scanned; 818 size_t _opt_refs_memory_used; 819 820 Tickspan _strong_code_root_scan_time; 821 Tickspan _strong_code_trim_partially_time; 822 823 Tickspan _rem_set_opt_root_scan_time; 824 Tickspan _rem_set_opt_trim_partially_time; 825 826 void scan_opt_rem_set_roots(HeapRegion* r) { 827 EventGCPhaseParallel event; 828 829 G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r); 830 831 G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss); 832 G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl); 833 _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops()); 834 _opt_refs_memory_used += opt_rem_set_list->used_memory(); 835 836 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase)); 837 } 838 839 public: 840 G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state, 841 G1ParScanThreadState* pss, 842 uint worker_id, 843 G1GCPhaseTimes::GCParPhases scan_phase, 844 G1GCPhaseTimes::GCParPhases code_roots_phase) : 845 _pss(pss), 846 _scan_state(scan_state), 847 _scan_phase(scan_phase), 848 _code_roots_phase(code_roots_phase), 849 _worker_id(worker_id), 850 _opt_refs_scanned(0), 851 _opt_refs_memory_used(0), 852 _strong_code_root_scan_time(), 853 _strong_code_trim_partially_time(), 854 _rem_set_opt_root_scan_time(), 855 _rem_set_opt_trim_partially_time() { } 856 857 bool do_heap_region(HeapRegion* r) { 858 uint const region_idx = r->hrm_index(); 859 860 // The individual references for the optional remembered set are per-worker, so we 861 // always need to scan them. 862 if (r->has_index_in_opt_cset()) { 863 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time); 864 scan_opt_rem_set_roots(r); 865 } 866 867 if (_scan_state->claim_collection_set_region(region_idx)) { 868 EventGCPhaseParallel event; 869 870 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time); 871 // Scan the strong code root list attached to the current region 872 r->strong_code_roots_do(_pss->closures()->weak_codeblobs()); 873 874 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase)); 875 } 876 877 return false; 878 } 879 880 Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time; } 881 Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; } 882 883 Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; } 884 Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; } 885 886 size_t opt_refs_scanned() const { return _opt_refs_scanned; } 887 size_t opt_refs_memory_used() const { return _opt_refs_memory_used; } 888 }; 889 890 void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss, 891 uint worker_id, 892 G1GCPhaseTimes::GCParPhases scan_phase, 893 G1GCPhaseTimes::GCParPhases coderoots_phase, 894 G1GCPhaseTimes::GCParPhases objcopy_phase) { 895 G1ScanCollectionSetRegionClosure cl(_scan_state, pss, worker_id, scan_phase, coderoots_phase); 896 _g1h->collection_set_iterate_increment_from(&cl, worker_id); 897 898 G1GCPhaseTimes* p = _g1h->phase_times(); 899 900 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_root_scan_time().seconds()); 901 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds()); 902 903 p->record_or_add_time_secs(coderoots_phase, worker_id, cl.strong_code_root_scan_time().seconds()); 904 p->add_time_secs(objcopy_phase, worker_id, cl.strong_code_root_trim_partially_time().seconds()); 905 906 // At this time we record some metrics only for the evacuations after the initial one. 907 if (scan_phase == G1GCPhaseTimes::OptScanHR) { 908 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanHRScannedOptRefs); 909 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanHRUsedMemory); 910 } 911 } 912 913 void G1RemSet::prepare_for_scan_heap_roots() { 914 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 915 dcqs.concatenate_logs(); 916 917 _scan_state->prepare(); 918 } 919 920 class G1MergeHeapRootsTask : public AbstractGangTask { 921 922 // Visitor for remembered sets, dropping entries onto the card table. 923 class G1MergeCardSetClosure : public HeapRegionClosure { 924 G1RemSetScanState* _scan_state; 925 G1CardTable* _ct; 926 927 uint _merged_sparse; 928 uint _merged_fine; 929 uint _merged_coarse; 930 931 // Returns if the region contains cards we need to scan. If so, remember that 932 // region in the current set of dirty regions. 933 bool remember_if_interesting(uint const region_idx) { 934 if (!_scan_state->contains_cards_to_process(region_idx)) { 935 return false; 936 } 937 _scan_state->add_dirty_region(region_idx); 938 return true; 939 } 940 public: 941 G1MergeCardSetClosure(G1RemSetScanState* scan_state) : 942 _scan_state(scan_state), 943 _ct(G1CollectedHeap::heap()->card_table()), 944 _merged_sparse(0), 945 _merged_fine(0), 946 _merged_coarse(0) { } 947 948 void next_coarse_prt(uint const region_idx) { 949 if (!remember_if_interesting(region_idx)) { 950 return; 951 } 952 953 _merged_coarse++; 954 955 size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 956 _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion); 957 _scan_state->set_chunk_region_dirty(region_base_idx); 958 } 959 960 void next_fine_prt(uint const region_idx, BitMap* bm) { 961 if (!remember_if_interesting(region_idx)) { 962 return; 963 } 964 965 _merged_fine++; 966 967 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 968 BitMap::idx_t cur = bm->get_next_one_offset(0); 969 while (cur != bm->size()) { 970 _ct->mark_clean_as_dirty(region_base_idx + cur); 971 _scan_state->set_chunk_dirty(region_base_idx + cur); 972 cur = bm->get_next_one_offset(cur + 1); 973 } 974 } 975 976 void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) { 977 if (!remember_if_interesting(region_idx)) { 978 return; 979 } 980 981 _merged_sparse++; 982 983 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 984 for (uint i = 0; i < num_cards; i++) { 985 size_t card_idx = region_base_idx + cards[i]; 986 _ct->mark_clean_as_dirty(card_idx); 987 _scan_state->set_chunk_dirty(card_idx); 988 } 989 } 990 991 virtual bool do_heap_region(HeapRegion* r) { 992 assert(r->in_collection_set() || r->is_starts_humongous(), "must be"); 993 994 HeapRegionRemSet* rem_set = r->rem_set(); 995 if (!rem_set->is_empty()) { 996 rem_set->iterate_prts(*this); 997 } 998 999 return false; 1000 } 1001 1002 size_t merged_sparse() const { return _merged_sparse; } 1003 size_t merged_fine() const { return _merged_fine; } 1004 size_t merged_coarse() const { return _merged_coarse; } 1005 }; 1006 1007 // Visitor for the remembered sets of humongous candidate regions to merge their 1008 // remembered set into the card table. 1009 class G1FlushHumongousCandidateRemSets : public HeapRegionClosure { 1010 G1MergeCardSetClosure _cl; 1011 1012 public: 1013 G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { } 1014 1015 virtual bool do_heap_region(HeapRegion* r) { 1016 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1017 1018 if (!r->is_starts_humongous() || 1019 !g1h->region_attr(r->hrm_index()).is_humongous() || 1020 r->rem_set()->is_empty()) { 1021 return false; 1022 } 1023 1024 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), 1025 "Found a not-small remembered set here. This is inconsistent with previous assumptions."); 1026 1027 _cl.do_heap_region(r); 1028 1029 // We should only clear the card based remembered set here as we will not 1030 // implicitly rebuild anything else during eager reclaim. Note that at the moment 1031 // (and probably never) we do not enter this path if there are other kind of 1032 // remembered sets for this region. 1033 r->rem_set()->clear_locked(true /* only_cardset */); 1034 // Clear_locked() above sets the state to Empty. However we want to continue 1035 // collecting remembered set entries for humongous regions that were not 1036 // reclaimed. 1037 r->rem_set()->set_state_complete(); 1038 #ifdef ASSERT 1039 G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index()); 1040 assert(region_attr.needs_remset_update(), "must be"); 1041 #endif 1042 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); 1043 1044 return false; 1045 } 1046 1047 size_t merged_sparse() const { return _cl.merged_sparse(); } 1048 size_t merged_fine() const { return _cl.merged_fine(); } 1049 size_t merged_coarse() const { return _cl.merged_coarse(); } 1050 }; 1051 1052 // Visitor for the log buffer entries to merge them into the card table. 1053 class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure { 1054 G1RemSetScanState* _scan_state; 1055 G1CardTable* _ct; 1056 1057 size_t _cards_dirty; 1058 size_t _cards_skipped; 1059 public: 1060 G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) : 1061 _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0) 1062 {} 1063 1064 void do_card_ptr(CardValue* card_ptr, uint worker_id) { 1065 // The only time we care about recording cards that 1066 // contain references that point into the collection set 1067 // is during RSet updating within an evacuation pause. 1068 // In this case worker_id should be the id of a GC worker thread. 1069 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 1070 1071 uint const region_idx = _ct->region_idx_for(card_ptr); 1072 1073 // The second clause must come after - the log buffers might contain cards to uncommited 1074 // regions. 1075 // This code may count duplicate entries in the log buffers (even if rare) multiple 1076 // times. 1077 if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) { 1078 _scan_state->add_dirty_region(region_idx); 1079 _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr)); 1080 _cards_dirty++; 1081 } else { 1082 // We may have had dirty cards in the (initial) collection set (or the 1083 // young regions which are always in the initial collection set). We do 1084 // not fix their cards here: we already added these regions to the set of 1085 // regions to clear the card table at the end during the prepare() phase. 1086 _cards_skipped++; 1087 } 1088 } 1089 1090 size_t cards_dirty() const { return _cards_dirty; } 1091 size_t cards_skipped() const { return _cards_skipped; } 1092 }; 1093 1094 HeapRegionClaimer _hr_claimer; 1095 G1RemSetScanState* _scan_state; 1096 BufferNode::Stack _dirty_card_buffers; 1097 bool _initial_evacuation; 1098 1099 volatile bool _fast_reclaim_handled; 1100 1101 void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) { 1102 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1103 size_t buffer_size = dcqs.buffer_size(); 1104 while (BufferNode* node = _dirty_card_buffers.pop()) { 1105 cl->apply_to_buffer(node, buffer_size, worker_id); 1106 dcqs.deallocate_buffer(node); 1107 } 1108 } 1109 1110 public: 1111 G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) : 1112 AbstractGangTask("G1 Merge Heap Roots"), 1113 _hr_claimer(num_workers), 1114 _scan_state(scan_state), 1115 _dirty_card_buffers(), 1116 _initial_evacuation(initial_evacuation), 1117 _fast_reclaim_handled(false) 1118 { 1119 if (initial_evacuation) { 1120 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1121 G1BufferNodeList buffers = dcqs.take_all_completed_buffers(); 1122 if (buffers._entry_count != 0) { 1123 _dirty_card_buffers.prepend(*buffers._head, *buffers._tail); 1124 } 1125 } 1126 } 1127 1128 virtual void work(uint worker_id) { 1129 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1130 G1GCPhaseTimes* p = g1h->phase_times(); 1131 1132 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ? 1133 G1GCPhaseTimes::MergeRS : 1134 G1GCPhaseTimes::OptMergeRS; 1135 1136 // We schedule flushing the remembered sets of humongous fast reclaim candidates 1137 // onto the card table first to allow the remaining parallelized tasks hide it. 1138 if (_initial_evacuation && 1139 p->fast_reclaim_humongous_candidates() > 0 && 1140 !_fast_reclaim_handled && 1141 !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) { 1142 1143 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id); 1144 1145 G1FlushHumongousCandidateRemSets cl(_scan_state); 1146 g1h->heap_region_iterate(&cl); 1147 1148 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1149 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1150 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1151 } 1152 1153 // Merge remembered sets of current candidates. 1154 { 1155 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */); 1156 G1MergeCardSetClosure cl(_scan_state); 1157 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id); 1158 1159 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1160 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1161 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1162 } 1163 1164 // Apply closure to log entries in the HCC. 1165 if (_initial_evacuation && G1HotCardCache::default_use_cache()) { 1166 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1167 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id); 1168 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1169 g1h->iterate_hcc_closure(&cl, worker_id); 1170 1171 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards); 1172 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards); 1173 } 1174 1175 // Now apply the closure to all remaining log entries. 1176 if (_initial_evacuation) { 1177 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1178 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id); 1179 1180 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1181 apply_closure_to_dirty_card_buffers(&cl, worker_id); 1182 1183 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards); 1184 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards); 1185 } 1186 } 1187 }; 1188 1189 void G1RemSet::print_merge_heap_roots_stats() { 1190 size_t num_visited_cards = _scan_state->num_visited_cards(); 1191 1192 size_t total_dirty_region_cards = _scan_state->num_cards_in_dirty_regions(); 1193 1194 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1195 size_t total_old_region_cards = 1196 (g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion; 1197 1198 log_debug(gc,remset)("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)", 1199 num_visited_cards, 1200 total_dirty_region_cards, 1201 percent_of(num_visited_cards, total_dirty_region_cards), 1202 total_old_region_cards, 1203 percent_of(num_visited_cards, total_old_region_cards)); 1204 } 1205 1206 void G1RemSet::merge_heap_roots(bool initial_evacuation) { 1207 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1208 1209 { 1210 Ticks start = Ticks::now(); 1211 1212 _scan_state->prepare_for_merge_heap_roots(); 1213 1214 Tickspan total = Ticks::now() - start; 1215 if (initial_evacuation) { 1216 g1h->phase_times()->record_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1217 } else { 1218 g1h->phase_times()->record_or_add_optional_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1219 } 1220 } 1221 1222 WorkGang* workers = g1h->workers(); 1223 size_t const increment_length = g1h->collection_set()->increment_length(); 1224 1225 uint const num_workers = initial_evacuation ? workers->active_workers() : 1226 MIN2(workers->active_workers(), (uint)increment_length); 1227 1228 { 1229 G1MergeHeapRootsTask cl(_scan_state, num_workers, initial_evacuation); 1230 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " regions", 1231 cl.name(), num_workers, increment_length); 1232 workers->run_task(&cl, num_workers); 1233 } 1234 1235 if (log_is_enabled(Debug, gc, remset)) { 1236 print_merge_heap_roots_stats(); 1237 } 1238 } 1239 1240 void G1RemSet::prepare_for_scan_heap_roots(uint region_idx) { 1241 _scan_state->clear_scan_top(region_idx); 1242 } 1243 1244 void G1RemSet::cleanup_after_scan_heap_roots() { 1245 G1GCPhaseTimes* phase_times = _g1h->phase_times(); 1246 1247 // Set all cards back to clean. 1248 double start = os::elapsedTime(); 1249 _scan_state->cleanup(_g1h->workers()); 1250 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 1251 } 1252 1253 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) { 1254 #ifdef ASSERT 1255 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1256 assert(g1h->is_in_exact(ct->addr_for(card_ptr)), 1257 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 1258 p2i(card_ptr), 1259 ct->index_for(ct->addr_for(card_ptr)), 1260 p2i(ct->addr_for(card_ptr)), 1261 g1h->addr_to_region(ct->addr_for(card_ptr))); 1262 #endif 1263 } 1264 1265 bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) { 1266 assert(!_g1h->is_gc_active(), "Only call concurrently"); 1267 1268 CardValue* card_ptr = *card_ptr_addr; 1269 // Find the start address represented by the card. 1270 HeapWord* start = _ct->addr_for(card_ptr); 1271 // And find the region containing it. 1272 HeapRegion* r = _g1h->heap_region_containing_or_null(start); 1273 1274 // If this is a (stale) card into an uncommitted region, exit. 1275 if (r == NULL) { 1276 return false; 1277 } 1278 1279 check_card_ptr(card_ptr, _ct); 1280 1281 // If the card is no longer dirty, nothing to do. 1282 // We cannot load the card value before the "r == NULL" check, because G1 1283 // could uncommit parts of the card table covering uncommitted regions. 1284 if (*card_ptr != G1CardTable::dirty_card_val()) { 1285 return false; 1286 } 1287 1288 // This check is needed for some uncommon cases where we should 1289 // ignore the card. 1290 // 1291 // The region could be young. Cards for young regions are 1292 // distinctly marked (set to g1_young_gen), so the post-barrier will 1293 // filter them out. However, that marking is performed 1294 // concurrently. A write to a young object could occur before the 1295 // card has been marked young, slipping past the filter. 1296 // 1297 // The card could be stale, because the region has been freed since 1298 // the card was recorded. In this case the region type could be 1299 // anything. If (still) free or (reallocated) young, just ignore 1300 // it. If (reallocated) old or humongous, the later card trimming 1301 // and additional checks in iteration may detect staleness. At 1302 // worst, we end up processing a stale card unnecessarily. 1303 // 1304 // In the normal (non-stale) case, the synchronization between the 1305 // enqueueing of the card and processing it here will have ensured 1306 // we see the up-to-date region type here. 1307 if (!r->is_old_or_humongous_or_archive()) { 1308 return false; 1309 } 1310 1311 // The result from the hot card cache insert call is either: 1312 // * pointer to the current card 1313 // (implying that the current card is not 'hot'), 1314 // * null 1315 // (meaning we had inserted the card ptr into the "hot" card cache, 1316 // which had some headroom), 1317 // * a pointer to a "hot" card that was evicted from the "hot" cache. 1318 // 1319 1320 if (_hot_card_cache->use_cache()) { 1321 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 1322 1323 const CardValue* orig_card_ptr = card_ptr; 1324 card_ptr = _hot_card_cache->insert(card_ptr); 1325 if (card_ptr == NULL) { 1326 // There was no eviction. Nothing to do. 1327 return false; 1328 } else if (card_ptr != orig_card_ptr) { 1329 // Original card was inserted and an old card was evicted. 1330 start = _ct->addr_for(card_ptr); 1331 r = _g1h->heap_region_containing(start); 1332 1333 // Check whether the region formerly in the cache should be 1334 // ignored, as discussed earlier for the original card. The 1335 // region could have been freed while in the cache. 1336 if (!r->is_old_or_humongous_or_archive()) { 1337 return false; 1338 } 1339 *card_ptr_addr = card_ptr; 1340 } // Else we still have the original card. 1341 } 1342 1343 // Trim the region designated by the card to what's been allocated 1344 // in the region. The card could be stale, or the card could cover 1345 // (part of) an object at the end of the allocated space and extend 1346 // beyond the end of allocation. 1347 1348 // Non-humongous objects are either allocated in the old regions during GC, 1349 // or mapped in archive regions during startup. So if region is old or 1350 // archive then top is stable. 1351 // Humongous object allocation sets top last; if top has not yet been set, 1352 // this is a stale card and we'll end up with an empty intersection. 1353 // If this is not a stale card, the synchronization between the 1354 // enqueuing of the card and processing it here will have ensured 1355 // we see the up-to-date top here. 1356 HeapWord* scan_limit = r->top(); 1357 1358 if (scan_limit <= start) { 1359 // If the trimmed region is empty, the card must be stale. 1360 return false; 1361 } 1362 1363 // Okay to clean and process the card now. There are still some 1364 // stale card cases that may be detected by iteration and dealt with 1365 // as iteration failure. 1366 *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val(); 1367 1368 return true; 1369 } 1370 1371 void G1RemSet::refine_card_concurrently(CardValue* const card_ptr, 1372 const uint worker_id) { 1373 assert(!_g1h->is_gc_active(), "Only call concurrently"); 1374 check_card_ptr(card_ptr, _ct); 1375 1376 // Construct the MemRegion representing the card. 1377 HeapWord* start = _ct->addr_for(card_ptr); 1378 // And find the region containing it. 1379 HeapRegion* r = _g1h->heap_region_containing(start); 1380 // This reload of the top is safe even though it happens after the full 1381 // fence, because top is stable for old, archive and unfiltered humongous 1382 // regions, so it must return the same value as the previous load when 1383 // cleaning the card. Also cleaning the card and refinement of the card 1384 // cannot span across safepoint, so we don't need to worry about top being 1385 // changed during safepoint. 1386 HeapWord* scan_limit = r->top(); 1387 assert(scan_limit > start, "sanity"); 1388 1389 // Don't use addr_for(card_ptr + 1) which can ask for 1390 // a card beyond the heap. 1391 HeapWord* end = start + G1CardTable::card_size_in_words; 1392 MemRegion dirty_region(start, MIN2(scan_limit, end)); 1393 assert(!dirty_region.is_empty(), "sanity"); 1394 1395 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id); 1396 if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) { 1397 return; 1398 } 1399 1400 // If unable to process the card then we encountered an unparsable 1401 // part of the heap (e.g. a partially allocated object, so only 1402 // temporarily a problem) while processing a stale card. Despite 1403 // the card being stale, we can't simply ignore it, because we've 1404 // already marked the card cleaned, so taken responsibility for 1405 // ensuring the card gets scanned. 1406 // 1407 // However, the card might have gotten re-dirtied and re-enqueued 1408 // while we worked. (In fact, it's pretty likely.) 1409 if (*card_ptr == G1CardTable::dirty_card_val()) { 1410 return; 1411 } 1412 1413 // Re-dirty the card and enqueue in the *shared* queue. Can't use 1414 // the thread-local queue, because that might be the queue that is 1415 // being processed by us; we could be a Java thread conscripted to 1416 // perform refinement on our queue's current buffer. 1417 *card_ptr = G1CardTable::dirty_card_val(); 1418 G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr); 1419 } 1420 1421 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 1422 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 1423 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 1424 1425 G1RemSetSummary current; 1426 _prev_period_summary.subtract_from(¤t); 1427 1428 Log(gc, remset) log; 1429 log.trace("%s", header); 1430 ResourceMark rm; 1431 LogStream ls(log.trace()); 1432 _prev_period_summary.print_on(&ls); 1433 1434 _prev_period_summary.set(¤t); 1435 } 1436 } 1437 1438 void G1RemSet::print_summary_info() { 1439 Log(gc, remset, exit) log; 1440 if (log.is_trace()) { 1441 log.trace(" Cumulative RS summary"); 1442 G1RemSetSummary current; 1443 ResourceMark rm; 1444 LogStream ls(log.trace()); 1445 current.print_on(&ls); 1446 } 1447 } 1448 1449 class G1RebuildRemSetTask: public AbstractGangTask { 1450 // Aggregate the counting data that was constructed concurrently 1451 // with marking. 1452 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 1453 G1ConcurrentMark* _cm; 1454 G1RebuildRemSetClosure _update_cl; 1455 1456 // Applies _update_cl to the references of the given object, limiting objArrays 1457 // to the given MemRegion. Returns the amount of words actually scanned. 1458 size_t scan_for_references(oop const obj, MemRegion mr) { 1459 size_t const obj_size = obj->size(); 1460 // All non-objArrays and objArrays completely within the mr 1461 // can be scanned without passing the mr. 1462 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 1463 obj->oop_iterate(&_update_cl); 1464 return obj_size; 1465 } 1466 // This path is for objArrays crossing the given MemRegion. Only scan the 1467 // area within the MemRegion. 1468 obj->oop_iterate(&_update_cl, mr); 1469 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 1470 } 1471 1472 // A humongous object is live (with respect to the scanning) either 1473 // a) it is marked on the bitmap as such 1474 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 1475 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 1476 return bitmap->is_marked(humongous_obj) || (tars > tams); 1477 } 1478 1479 // Iterator over the live objects within the given MemRegion. 1480 class LiveObjIterator : public StackObj { 1481 const G1CMBitMap* const _bitmap; 1482 const HeapWord* _tams; 1483 const MemRegion _mr; 1484 HeapWord* _current; 1485 1486 bool is_below_tams() const { 1487 return _current < _tams; 1488 } 1489 1490 bool is_live(HeapWord* obj) const { 1491 return !is_below_tams() || _bitmap->is_marked(obj); 1492 } 1493 1494 HeapWord* bitmap_limit() const { 1495 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 1496 } 1497 1498 void move_if_below_tams() { 1499 if (is_below_tams() && has_next()) { 1500 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1501 } 1502 } 1503 public: 1504 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 1505 _bitmap(bitmap), 1506 _tams(tams), 1507 _mr(mr), 1508 _current(first_oop_into_mr) { 1509 1510 assert(_current <= _mr.start(), 1511 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 1512 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 1513 1514 // Step to the next live object within the MemRegion if needed. 1515 if (is_live(_current)) { 1516 // Non-objArrays were scanned by the previous part of that region. 1517 if (_current < mr.start() && !oop(_current)->is_objArray()) { 1518 _current += oop(_current)->size(); 1519 // We might have positioned _current on a non-live object. Reposition to the next 1520 // live one if needed. 1521 move_if_below_tams(); 1522 } 1523 } else { 1524 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 1525 // immediately. 1526 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1527 assert(_current == _mr.end() || is_live(_current), 1528 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 1529 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 1530 } 1531 } 1532 1533 void move_to_next() { 1534 _current += next()->size(); 1535 move_if_below_tams(); 1536 } 1537 1538 oop next() const { 1539 oop result = oop(_current); 1540 assert(is_live(_current), 1541 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 1542 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 1543 return result; 1544 } 1545 1546 bool has_next() const { 1547 return _current < _mr.end(); 1548 } 1549 }; 1550 1551 // Rebuild remembered sets in the part of the region specified by mr and hr. 1552 // Objects between the bottom of the region and the TAMS are checked for liveness 1553 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 1554 // Returns the number of live words between bottom and TAMS. 1555 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 1556 HeapWord* const top_at_mark_start, 1557 HeapWord* const top_at_rebuild_start, 1558 HeapRegion* hr, 1559 MemRegion mr) { 1560 size_t marked_words = 0; 1561 1562 if (hr->is_humongous()) { 1563 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 1564 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 1565 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 1566 // however in case of humongous objects it is sufficient to scan the encompassing 1567 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 1568 // two areas will be zero sized. I.e. TAMS is either 1569 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 1570 // value: this would mean that TAMS points somewhere into the object. 1571 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 1572 "More than one object in the humongous region?"); 1573 humongous_obj->oop_iterate(&_update_cl, mr); 1574 return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; 1575 } else { 1576 return 0; 1577 } 1578 } 1579 1580 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 1581 oop obj = it.next(); 1582 size_t scanned_size = scan_for_references(obj, mr); 1583 if ((HeapWord*)obj < top_at_mark_start) { 1584 marked_words += scanned_size; 1585 } 1586 } 1587 1588 return marked_words * HeapWordSize; 1589 } 1590 public: 1591 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 1592 G1ConcurrentMark* cm, 1593 uint worker_id) : 1594 HeapRegionClosure(), 1595 _cm(cm), 1596 _update_cl(g1h, worker_id) { } 1597 1598 bool do_heap_region(HeapRegion* hr) { 1599 if (_cm->has_aborted()) { 1600 return true; 1601 } 1602 1603 uint const region_idx = hr->hrm_index(); 1604 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 1605 assert(top_at_rebuild_start_check == NULL || 1606 top_at_rebuild_start_check > hr->bottom(), 1607 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 1608 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 1609 1610 size_t total_marked_bytes = 0; 1611 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 1612 1613 HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start(); 1614 1615 HeapWord* cur = hr->bottom(); 1616 while (cur < hr->end()) { 1617 // After every iteration (yield point) we need to check whether the region's 1618 // TARS changed due to e.g. eager reclaim. 1619 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 1620 if (top_at_rebuild_start == NULL) { 1621 return false; 1622 } 1623 1624 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 1625 if (next_chunk.is_empty()) { 1626 break; 1627 } 1628 1629 const Ticks start = Ticks::now(); 1630 size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(), 1631 top_at_mark_start, 1632 top_at_rebuild_start, 1633 hr, 1634 next_chunk); 1635 Tickspan time = Ticks::now() - start; 1636 1637 log_trace(gc, remset, tracking)("Rebuilt region %u " 1638 "live " SIZE_FORMAT " " 1639 "time %.3fms " 1640 "marked bytes " SIZE_FORMAT " " 1641 "bot " PTR_FORMAT " " 1642 "TAMS " PTR_FORMAT " " 1643 "TARS " PTR_FORMAT, 1644 region_idx, 1645 _cm->liveness(region_idx) * HeapWordSize, 1646 time.seconds() * 1000.0, 1647 marked_bytes, 1648 p2i(hr->bottom()), 1649 p2i(top_at_mark_start), 1650 p2i(top_at_rebuild_start)); 1651 1652 if (marked_bytes > 0) { 1653 total_marked_bytes += marked_bytes; 1654 } 1655 cur += chunk_size_in_words; 1656 1657 _cm->do_yield_check(); 1658 if (_cm->has_aborted()) { 1659 return true; 1660 } 1661 } 1662 // In the final iteration of the loop the region might have been eagerly reclaimed. 1663 // Simply filter out those regions. We can not just use region type because there 1664 // might have already been new allocations into these regions. 1665 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 1666 assert(top_at_rebuild_start == NULL || 1667 total_marked_bytes == hr->marked_bytes(), 1668 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " " 1669 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 1670 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(), 1671 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 1672 // Abort state may have changed after the yield check. 1673 return _cm->has_aborted(); 1674 } 1675 }; 1676 1677 HeapRegionClaimer _hr_claimer; 1678 G1ConcurrentMark* _cm; 1679 1680 uint _worker_id_offset; 1681 public: 1682 G1RebuildRemSetTask(G1ConcurrentMark* cm, 1683 uint n_workers, 1684 uint worker_id_offset) : 1685 AbstractGangTask("G1 Rebuild Remembered Set"), 1686 _hr_claimer(n_workers), 1687 _cm(cm), 1688 _worker_id_offset(worker_id_offset) { 1689 } 1690 1691 void work(uint worker_id) { 1692 SuspendibleThreadSetJoiner sts_join; 1693 1694 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1695 1696 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 1697 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 1698 } 1699 }; 1700 1701 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 1702 WorkGang* workers, 1703 uint worker_id_offset) { 1704 uint num_workers = workers->active_workers(); 1705 1706 G1RebuildRemSetTask cl(cm, 1707 num_workers, 1708 worker_id_offset); 1709 workers->run_task(&cl, num_workers); 1710 } --- EOF ---