1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BarrierSet.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CardTable.inline.hpp" 29 #include "gc/g1/g1CardTableEntryClosure.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1ConcurrentRefine.hpp" 32 #include "gc/g1/g1DirtyCardQueue.hpp" 33 #include "gc/g1/g1FromCardCache.hpp" 34 #include "gc/g1/g1GCPhaseTimes.hpp" 35 #include "gc/g1/g1HotCardCache.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RootClosures.hpp" 38 #include "gc/g1/g1RemSet.hpp" 39 #include "gc/g1/g1SharedDirtyCardQueue.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionManager.inline.hpp" 42 #include "gc/g1/heapRegionRemSet.inline.hpp" 43 #include "gc/g1/sparsePRT.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/ptrQueue.hpp" 46 #include "gc/shared/suspendibleThreadSet.hpp" 47 #include "jfr/jfrEvents.hpp" 48 #include "memory/iterator.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "oops/access.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/atomic.hpp" 53 #include "runtime/os.hpp" 54 #include "utilities/align.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 #include "utilities/stack.inline.hpp" 57 #include "utilities/ticks.hpp" 58 59 // Collects information about the overall heap root scan progress during an evacuation. 60 // 61 // Scanning the remembered sets works by first merging all sources of cards to be 62 // scanned (log buffers, hcc, remembered sets) into a single data structure to remove 63 // duplicates and simplify work distribution. 64 // 65 // During the following card scanning we not only scan this combined set of cards, but 66 // also remember that these were completely scanned. The following evacuation passes 67 // do not scan these cards again, and so need to be preserved across increments. 68 // 69 // The representation for all the cards to scan is the card table: cards can have 70 // one of three states during GC: 71 // - clean: these cards will not be scanned in this pass 72 // - dirty: these cards will be scanned in this pass 73 // - scanned: these cards have already been scanned in a previous pass 74 // 75 // After all evacuation is done, we reset the card table to clean. 76 // 77 // Work distribution occurs on "chunk" basis, i.e. contiguous ranges of cards. As an 78 // additional optimization, during card merging we remember which regions and which 79 // chunks actually contain cards to be scanned. Threads iterate only across these 80 // regions, and only compete for chunks containing any cards. 81 // 82 // Within these chunks, a worker scans the card table on "blocks" of cards, i.e. 83 // contiguous ranges of dirty cards to be scanned. These blocks are converted to actual 84 // memory ranges and then passed on to actual scanning. 85 class G1RemSetScanState : public CHeapObj<mtGC> { 86 class G1DirtyRegions; 87 88 size_t _max_regions; 89 90 // Has this region that is part of the regions in the collection set been processed yet. 91 typedef bool G1RemsetIterState; 92 93 G1RemsetIterState volatile* _collection_set_iter_state; 94 95 // Card table iteration claim for each heap region, from 0 (completely unscanned) 96 // to (>=) HeapRegion::CardsPerRegion (completely scanned). 97 uint volatile* _card_table_scan_state; 98 99 // Return "optimal" number of chunks per region we want to use for claiming areas 100 // within a region to claim. Dependent on the region size as proxy for the heap 101 // size, we limit the total number of chunks to limit memory usage and maintenance 102 // effort of that table vs. granularity of distributing scanning work. 103 // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions 104 // seems to be such a good trade-off. 105 static uint get_chunks_per_region(uint log_region_size) { 106 // Limit the expected input values to current known possible values of the 107 // (log) region size. Adjust as necessary after testing if changing the permissible 108 // values for region size. 109 assert(log_region_size >= 20 && log_region_size <= 25, 110 "expected value in [20,25], but got %u", log_region_size); 111 return 1u << (log_region_size / 2 - 7); 112 } 113 114 uint _scan_chunks_per_region; // Number of chunks per region. 115 uint8_t _log_scan_chunks_per_region; // Log of number of chunks per region. 116 bool* _region_scan_chunks; 117 size_t _num_total_scan_chunks; // Total number of elements in _region_scan_chunks. 118 uint8_t _scan_chunks_shift; // For conversion between card index and chunk index. 119 public: 120 uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; } 121 122 // Returns whether the chunk corresponding to the given region/card in region contain a 123 // dirty card, i.e. actually needs scanning. 124 bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const { 125 size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift); 126 assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 127 idx, _num_total_scan_chunks); 128 return _region_scan_chunks[idx]; 129 } 130 131 private: 132 // The complete set of regions which card table needs to be cleared at the end of GC because 133 // we scribbled all over them. 134 G1DirtyRegions* _all_dirty_regions; 135 // The set of regions which card table needs to be scanned for new dirty cards 136 // in the current evacuation pass. 137 G1DirtyRegions* _next_dirty_regions; 138 139 // Set of (unique) regions that can be added to concurrently. 140 class G1DirtyRegions : public CHeapObj<mtGC> { 141 uint* _buffer; 142 uint _cur_idx; 143 size_t _max_regions; 144 145 bool* _contains; 146 147 public: 148 G1DirtyRegions(size_t max_regions) : 149 _buffer(NEW_C_HEAP_ARRAY(uint, max_regions, mtGC)), 150 _cur_idx(0), 151 _max_regions(max_regions), 152 _contains(NEW_C_HEAP_ARRAY(bool, max_regions, mtGC)) { 153 154 reset(); 155 } 156 157 static size_t chunk_size() { return M; } 158 159 ~G1DirtyRegions() { 160 FREE_C_HEAP_ARRAY(uint, _buffer); 161 FREE_C_HEAP_ARRAY(bool, _contains); 162 } 163 164 void reset() { 165 _cur_idx = 0; 166 ::memset(_contains, false, _max_regions * sizeof(bool)); 167 } 168 169 uint size() const { return _cur_idx; } 170 171 uint at(uint idx) const { 172 assert(idx < _cur_idx, "Index %u beyond valid regions", idx); 173 return _buffer[idx]; 174 } 175 176 void add_dirty_region(uint region) { 177 if (_contains[region]) { 178 return; 179 } 180 181 bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false; 182 if (marked_as_dirty) { 183 uint allocated = Atomic::add(&_cur_idx, 1u) - 1; 184 _buffer[allocated] = region; 185 } 186 } 187 188 // Creates the union of this and the other G1DirtyRegions. 189 void merge(const G1DirtyRegions* other) { 190 for (uint i = 0; i < other->size(); i++) { 191 uint region = other->at(i); 192 if (!_contains[region]) { 193 _buffer[_cur_idx++] = region; 194 _contains[region] = true; 195 } 196 } 197 } 198 }; 199 200 // For each region, contains the maximum top() value to be used during this garbage 201 // collection. Subsumes common checks like filtering out everything but old and 202 // humongous regions outside the collection set. 203 // This is valid because we are not interested in scanning stray remembered set 204 // entries from free or archive regions. 205 HeapWord** _scan_top; 206 207 class G1ClearCardTableTask : public AbstractGangTask { 208 G1CollectedHeap* _g1h; 209 G1DirtyRegions* _regions; 210 uint _chunk_length; 211 212 uint volatile _cur_dirty_regions; 213 214 G1RemSetScanState* _scan_state; 215 216 public: 217 G1ClearCardTableTask(G1CollectedHeap* g1h, 218 G1DirtyRegions* regions, 219 uint chunk_length, 220 G1RemSetScanState* scan_state) : 221 AbstractGangTask("G1 Clear Card Table Task"), 222 _g1h(g1h), 223 _regions(regions), 224 _chunk_length(chunk_length), 225 _cur_dirty_regions(0), 226 _scan_state(scan_state) { 227 228 assert(chunk_length > 0, "must be"); 229 } 230 231 static uint chunk_size() { return M; } 232 233 void work(uint worker_id) { 234 while (_cur_dirty_regions < _regions->size()) { 235 uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length; 236 uint max = MIN2(next + _chunk_length, _regions->size()); 237 238 for (uint i = next; i < max; i++) { 239 HeapRegion* r = _g1h->region_at(_regions->at(i)); 240 if (!r->is_survivor()) { 241 r->clear_cardtable(); 242 } 243 } 244 } 245 } 246 }; 247 248 // Clear the card table of "dirty" regions. 249 void clear_card_table(WorkGang* workers) { 250 uint num_regions = _all_dirty_regions->size(); 251 252 if (num_regions == 0) { 253 return; 254 } 255 256 uint const num_chunks = (uint)(align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size()); 257 uint const num_workers = MIN2(num_chunks, workers->active_workers()); 258 uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion; 259 260 // Iterate over the dirty cards region list. 261 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this); 262 263 log_debug(gc, ergo)("Running %s using %u workers for %u " 264 "units of work for %u regions.", 265 cl.name(), num_workers, num_chunks, num_regions); 266 workers->run_task(&cl, num_workers); 267 268 #ifndef PRODUCT 269 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 270 #endif 271 } 272 273 public: 274 G1RemSetScanState() : 275 _max_regions(0), 276 _collection_set_iter_state(NULL), 277 _card_table_scan_state(NULL), 278 _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)), 279 _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)), 280 _region_scan_chunks(NULL), 281 _num_total_scan_chunks(0), 282 _scan_chunks_shift(0), 283 _all_dirty_regions(NULL), 284 _next_dirty_regions(NULL), 285 _scan_top(NULL) { 286 } 287 288 ~G1RemSetScanState() { 289 FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state); 290 FREE_C_HEAP_ARRAY(uint, _card_table_scan_state); 291 FREE_C_HEAP_ARRAY(bool, _region_scan_chunks); 292 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 293 } 294 295 void initialize(size_t max_regions) { 296 assert(_collection_set_iter_state == NULL, "Must not be initialized twice"); 297 _max_regions = max_regions; 298 _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 299 _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 300 _num_total_scan_chunks = max_regions * _scan_chunks_per_region; 301 _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC); 302 303 _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region); 304 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 305 } 306 307 void prepare() { 308 // Reset the claim and clear scan top for all regions, including 309 // regions currently not available or free. Since regions might 310 // become used during the collection these values must be valid 311 // for those regions as well. 312 for (size_t i = 0; i < _max_regions; i++) { 313 reset_region_claim((uint)i); 314 clear_scan_top((uint)i); 315 } 316 317 _all_dirty_regions = new G1DirtyRegions(_max_regions); 318 _next_dirty_regions = new G1DirtyRegions(_max_regions); 319 } 320 321 void prepare_for_merge_heap_roots() { 322 _all_dirty_regions->merge(_next_dirty_regions); 323 324 _next_dirty_regions->reset(); 325 for (size_t i = 0; i < _max_regions; i++) { 326 _card_table_scan_state[i] = 0; 327 } 328 329 ::memset(_region_scan_chunks, false, _num_total_scan_chunks * sizeof(*_region_scan_chunks)); 330 } 331 332 // Returns whether the given region contains cards we need to scan. The remembered 333 // set and other sources may contain cards that 334 // - are in uncommitted regions 335 // - are located in the collection set 336 // - are located in free regions 337 // as we do not clean up remembered sets before merging heap roots. 338 bool contains_cards_to_process(uint const region_idx) const { 339 HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx); 340 return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive()); 341 } 342 343 size_t num_visited_cards() const { 344 size_t result = 0; 345 for (uint i = 0; i < _num_total_scan_chunks; i++) { 346 if (_region_scan_chunks[i]) { 347 result++; 348 } 349 } 350 return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region); 351 } 352 353 size_t num_cards_in_dirty_regions() const { 354 return _next_dirty_regions->size() * HeapRegion::CardsPerRegion; 355 } 356 357 void set_chunk_region_dirty(size_t const region_card_idx) { 358 size_t chunk_idx = region_card_idx >> _scan_chunks_shift; 359 for (uint i = 0; i < _scan_chunks_per_region; i++) { 360 _region_scan_chunks[chunk_idx++] = true; 361 } 362 } 363 364 void set_chunk_dirty(size_t const card_idx) { 365 assert((card_idx >> _scan_chunks_shift) < _num_total_scan_chunks, 366 "Trying to access index " SIZE_FORMAT " out of bounds " SIZE_FORMAT, 367 card_idx >> _scan_chunks_shift, _num_total_scan_chunks); 368 size_t const chunk_idx = card_idx >> _scan_chunks_shift; 369 if (!_region_scan_chunks[chunk_idx]) { 370 _region_scan_chunks[chunk_idx] = true; 371 } 372 } 373 374 void cleanup(WorkGang* workers) { 375 _all_dirty_regions->merge(_next_dirty_regions); 376 377 clear_card_table(workers); 378 379 delete _all_dirty_regions; 380 _all_dirty_regions = NULL; 381 382 delete _next_dirty_regions; 383 _next_dirty_regions = NULL; 384 } 385 386 void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) { 387 uint num_regions = _next_dirty_regions->size(); 388 389 if (num_regions == 0) { 390 return; 391 } 392 393 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 394 395 WorkGang* workers = g1h->workers(); 396 uint const max_workers = workers->active_workers(); 397 398 uint const start_pos = num_regions * worker_id / max_workers; 399 uint cur = start_pos; 400 401 do { 402 bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur))); 403 guarantee(!result, "Not allowed to ask for early termination."); 404 cur++; 405 if (cur == _next_dirty_regions->size()) { 406 cur = 0; 407 } 408 } while (cur != start_pos); 409 } 410 411 void reset_region_claim(uint region_idx) { 412 _collection_set_iter_state[region_idx] = false; 413 } 414 415 // Attempt to claim the given region in the collection set for iteration. Returns true 416 // if this call caused the transition from Unclaimed to Claimed. 417 inline bool claim_collection_set_region(uint region) { 418 assert(region < _max_regions, "Tried to access invalid region %u", region); 419 if (_collection_set_iter_state[region]) { 420 return false; 421 } 422 return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true); 423 } 424 425 bool has_cards_to_scan(uint region) { 426 assert(region < _max_regions, "Tried to access invalid region %u", region); 427 return _card_table_scan_state[region] < HeapRegion::CardsPerRegion; 428 } 429 430 uint claim_cards_to_scan(uint region, uint increment) { 431 assert(region < _max_regions, "Tried to access invalid region %u", region); 432 return Atomic::add(&_card_table_scan_state[region], increment) - increment; 433 } 434 435 void add_dirty_region(uint const region) { 436 #ifdef ASSERT 437 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 438 assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(), 439 "Region %u is not suitable for scanning, is %sin collection set or %s", 440 hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str()); 441 #endif 442 _next_dirty_regions->add_dirty_region(region); 443 } 444 445 void add_all_dirty_region(uint region) { 446 #ifdef ASSERT 447 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); 448 assert(hr->in_collection_set(), 449 "Only add young regions to all dirty regions directly but %u is %s", 450 hr->hrm_index(), hr->get_short_type_str()); 451 #endif 452 _all_dirty_regions->add_dirty_region(region); 453 } 454 455 void set_scan_top(uint region_idx, HeapWord* value) { 456 _scan_top[region_idx] = value; 457 } 458 459 HeapWord* scan_top(uint region_idx) const { 460 return _scan_top[region_idx]; 461 } 462 463 void clear_scan_top(uint region_idx) { 464 set_scan_top(region_idx, NULL); 465 } 466 }; 467 468 G1RemSet::G1RemSet(G1CollectedHeap* g1h, 469 G1CardTable* ct, 470 G1HotCardCache* hot_card_cache) : 471 _scan_state(new G1RemSetScanState()), 472 _prev_period_summary(false), 473 _g1h(g1h), 474 _ct(ct), 475 _g1p(_g1h->policy()), 476 _hot_card_cache(hot_card_cache) { 477 } 478 479 G1RemSet::~G1RemSet() { 480 delete _scan_state; 481 } 482 483 uint G1RemSet::num_par_rem_sets() { 484 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 485 } 486 487 void G1RemSet::initialize(size_t capacity, uint max_regions) { 488 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 489 _scan_state->initialize(max_regions); 490 } 491 492 // Helper class to scan and detect ranges of cards that need to be scanned on the 493 // card table. 494 class G1CardTableScanner : public StackObj { 495 public: 496 typedef CardTable::CardValue CardValue; 497 498 private: 499 CardValue* const _base_addr; 500 501 CardValue* _cur_addr; 502 CardValue* const _end_addr; 503 504 static const size_t ToScanMask = G1CardTable::g1_card_already_scanned; 505 static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned; 506 507 bool cur_addr_aligned() const { 508 return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0; 509 } 510 511 bool cur_card_is_dirty() const { 512 CardValue value = *_cur_addr; 513 return (value & ToScanMask) == 0; 514 } 515 516 bool cur_word_of_cards_contains_any_dirty_card() const { 517 assert(cur_addr_aligned(), "Current address should be aligned"); 518 size_t const value = *(size_t*)_cur_addr; 519 return (~value & ExpandedToScanMask) != 0; 520 } 521 522 bool cur_word_of_cards_all_dirty_cards() const { 523 size_t const value = *(size_t*)_cur_addr; 524 return value == G1CardTable::WordAllDirty; 525 } 526 527 size_t get_and_advance_pos() { 528 _cur_addr++; 529 return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1; 530 } 531 532 public: 533 G1CardTableScanner(CardValue* start_card, size_t size) : 534 _base_addr(start_card), 535 _cur_addr(start_card), 536 _end_addr(start_card + size) { 537 538 assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card)); 539 assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size); 540 } 541 542 size_t find_next_dirty() { 543 while (!cur_addr_aligned()) { 544 if (cur_card_is_dirty()) { 545 return get_and_advance_pos(); 546 } 547 _cur_addr++; 548 } 549 550 assert(cur_addr_aligned(), "Current address should be aligned now."); 551 while (_cur_addr != _end_addr) { 552 if (cur_word_of_cards_contains_any_dirty_card()) { 553 for (size_t i = 0; i < sizeof(size_t); i++) { 554 if (cur_card_is_dirty()) { 555 return get_and_advance_pos(); 556 } 557 _cur_addr++; 558 } 559 assert(false, "Should not reach here given we detected a dirty card in the word."); 560 } 561 _cur_addr += sizeof(size_t); 562 } 563 return get_and_advance_pos(); 564 } 565 566 size_t find_next_non_dirty() { 567 assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area."); 568 569 while (!cur_addr_aligned()) { 570 if (!cur_card_is_dirty()) { 571 return get_and_advance_pos(); 572 } 573 _cur_addr++; 574 } 575 576 assert(cur_addr_aligned(), "Current address should be aligned now."); 577 while (_cur_addr != _end_addr) { 578 if (!cur_word_of_cards_all_dirty_cards()) { 579 for (size_t i = 0; i < sizeof(size_t); i++) { 580 if (!cur_card_is_dirty()) { 581 return get_and_advance_pos(); 582 } 583 _cur_addr++; 584 } 585 assert(false, "Should not reach here given we detected a non-dirty card in the word."); 586 } 587 _cur_addr += sizeof(size_t); 588 } 589 return get_and_advance_pos(); 590 } 591 }; 592 593 // Helper class to claim dirty chunks within the card table. 594 class G1CardTableChunkClaimer { 595 G1RemSetScanState* _scan_state; 596 uint _region_idx; 597 uint _cur_claim; 598 599 public: 600 G1CardTableChunkClaimer(G1RemSetScanState* scan_state, uint region_idx) : 601 _scan_state(scan_state), 602 _region_idx(region_idx), 603 _cur_claim(0) { 604 guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible."); 605 } 606 607 bool has_next() { 608 while (true) { 609 _cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size()); 610 if (_cur_claim >= HeapRegion::CardsPerRegion) { 611 return false; 612 } 613 if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) { 614 return true; 615 } 616 } 617 } 618 619 uint value() const { return _cur_claim; } 620 uint size() const { return _scan_state->scan_chunk_size(); } 621 }; 622 623 // Scans a heap region for dirty cards. 624 class G1ScanHRForRegionClosure : public HeapRegionClosure { 625 G1CollectedHeap* _g1h; 626 G1CardTable* _ct; 627 G1BlockOffsetTable* _bot; 628 629 G1ParScanThreadState* _pss; 630 631 G1RemSetScanState* _scan_state; 632 633 G1GCPhaseTimes::GCParPhases _phase; 634 635 uint _worker_id; 636 637 size_t _cards_scanned; 638 size_t _blocks_scanned; 639 size_t _chunks_claimed; 640 641 Tickspan _rem_set_root_scan_time; 642 Tickspan _rem_set_trim_partially_time; 643 644 // The address to which this thread already scanned (walked the heap) up to during 645 // card scanning (exclusive). 646 HeapWord* _scanned_to; 647 648 HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) { 649 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 650 G1ScanCardClosure card_cl(_g1h, _pss); 651 652 HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl); 653 assert(scanned_to != NULL, "Should be able to scan range"); 654 assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end())); 655 656 _pss->trim_queue_partially(); 657 return scanned_to; 658 } 659 660 void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) { 661 HeapWord* const card_start = _bot->address_for_index_raw(first_card); 662 #ifdef ASSERT 663 HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card); 664 assert(hr == NULL || hr->is_in_reserved(card_start), 665 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 666 #endif 667 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 668 if (card_start >= top) { 669 return; 670 } 671 672 HeapWord* scan_end = MIN2(card_start + (num_cards << BOTConstants::LogN_words), top); 673 if (_scanned_to >= scan_end) { 674 return; 675 } 676 MemRegion mr(MAX2(card_start, _scanned_to), scan_end); 677 _scanned_to = scan_memregion(region_idx_for_card, mr); 678 679 _cards_scanned += num_cards; 680 } 681 682 ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) { 683 _ct->mark_as_scanned(first_card, num_cards); 684 do_claimed_block(region_idx, first_card, num_cards); 685 _blocks_scanned++; 686 } 687 688 void scan_heap_roots(HeapRegion* r) { 689 EventGCPhaseParallel event; 690 uint const region_idx = r->hrm_index(); 691 692 ResourceMark rm; 693 694 G1CardTableChunkClaimer claim(_scan_state, region_idx); 695 696 // Set the current scan "finger" to NULL for every heap region to scan. Since 697 // the claim value is monotonically increasing, the check to not scan below this 698 // will filter out objects spanning chunks within the region too then, as opposed 699 // to resetting this value for every claim. 700 _scanned_to = NULL; 701 702 while (claim.has_next()) { 703 size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value(); 704 CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx); 705 706 G1CardTableScanner scan(base_addr, claim.size()); 707 708 size_t first_scan_idx = scan.find_next_dirty(); 709 while (first_scan_idx != claim.size()) { 710 assert(*_ct->byte_for_index(region_card_base_idx + first_scan_idx) <= 0x1, "is %d at region %u idx " SIZE_FORMAT, *_ct->byte_for_index(region_card_base_idx + first_scan_idx), region_idx, first_scan_idx); 711 712 size_t const last_scan_idx = scan.find_next_non_dirty(); 713 size_t const len = last_scan_idx - first_scan_idx; 714 715 do_card_block(region_idx, region_card_base_idx + first_scan_idx, len); 716 717 if (last_scan_idx == claim.size()) { 718 break; 719 } 720 721 first_scan_idx = scan.find_next_dirty(); 722 } 723 _chunks_claimed++; 724 } 725 726 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanHR)); 727 } 728 729 public: 730 G1ScanHRForRegionClosure(G1RemSetScanState* scan_state, 731 G1ParScanThreadState* pss, 732 uint worker_id, 733 G1GCPhaseTimes::GCParPhases phase) : 734 _g1h(G1CollectedHeap::heap()), 735 _ct(_g1h->card_table()), 736 _bot(_g1h->bot()), 737 _pss(pss), 738 _scan_state(scan_state), 739 _phase(phase), 740 _worker_id(worker_id), 741 _cards_scanned(0), 742 _blocks_scanned(0), 743 _chunks_claimed(0), 744 _rem_set_root_scan_time(), 745 _rem_set_trim_partially_time(), 746 _scanned_to(NULL) { 747 } 748 749 bool do_heap_region(HeapRegion* r) { 750 assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(), 751 "Should only be called on old gen non-collection set regions but region %u is not.", 752 r->hrm_index()); 753 uint const region_idx = r->hrm_index(); 754 755 if (_scan_state->has_cards_to_scan(region_idx)) { 756 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time); 757 scan_heap_roots(r); 758 } 759 return false; 760 } 761 762 Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; } 763 Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; } 764 765 size_t cards_scanned() const { return _cards_scanned; } 766 size_t blocks_scanned() const { return _blocks_scanned; } 767 size_t chunks_claimed() const { return _chunks_claimed; } 768 }; 769 770 void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss, 771 uint worker_id, 772 G1GCPhaseTimes::GCParPhases scan_phase, 773 G1GCPhaseTimes::GCParPhases objcopy_phase) { 774 G1ScanHRForRegionClosure cl(_scan_state, pss, worker_id, scan_phase); 775 _scan_state->iterate_dirty_regions_from(&cl, worker_id); 776 777 G1GCPhaseTimes* p = _g1p->phase_times(); 778 779 p->record_or_add_time_secs(objcopy_phase, worker_id, cl.rem_set_trim_partially_time().seconds()); 780 781 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_root_scan_time().seconds()); 782 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.cards_scanned(), G1GCPhaseTimes::ScanHRScannedCards); 783 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.blocks_scanned(), G1GCPhaseTimes::ScanHRScannedBlocks); 784 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.chunks_claimed(), G1GCPhaseTimes::ScanHRClaimedChunks); 785 } 786 787 // Heap region closure to be applied to all regions in the current collection set 788 // increment to fix up non-card related roots. 789 class G1ScanCollectionSetRegionClosure : public HeapRegionClosure { 790 G1ParScanThreadState* _pss; 791 G1RemSetScanState* _scan_state; 792 793 G1GCPhaseTimes::GCParPhases _scan_phase; 794 G1GCPhaseTimes::GCParPhases _code_roots_phase; 795 796 uint _worker_id; 797 798 size_t _opt_refs_scanned; 799 size_t _opt_refs_memory_used; 800 801 Tickspan _strong_code_root_scan_time; 802 Tickspan _strong_code_trim_partially_time; 803 804 Tickspan _rem_set_opt_root_scan_time; 805 Tickspan _rem_set_opt_trim_partially_time; 806 807 void scan_opt_rem_set_roots(HeapRegion* r) { 808 EventGCPhaseParallel event; 809 810 G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r); 811 812 G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss); 813 G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl); 814 _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops()); 815 _opt_refs_memory_used += opt_rem_set_list->used_memory(); 816 817 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase)); 818 } 819 820 public: 821 G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state, 822 G1ParScanThreadState* pss, 823 uint worker_id, 824 G1GCPhaseTimes::GCParPhases scan_phase, 825 G1GCPhaseTimes::GCParPhases code_roots_phase) : 826 _pss(pss), 827 _scan_state(scan_state), 828 _scan_phase(scan_phase), 829 _code_roots_phase(code_roots_phase), 830 _worker_id(worker_id), 831 _opt_refs_scanned(0), 832 _opt_refs_memory_used(0), 833 _strong_code_root_scan_time(), 834 _strong_code_trim_partially_time(), 835 _rem_set_opt_root_scan_time(), 836 _rem_set_opt_trim_partially_time() { } 837 838 bool do_heap_region(HeapRegion* r) { 839 uint const region_idx = r->hrm_index(); 840 841 // The individual references for the optional remembered set are per-worker, so we 842 // always need to scan them. 843 if (r->has_index_in_opt_cset()) { 844 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time); 845 scan_opt_rem_set_roots(r); 846 } 847 848 if (_scan_state->claim_collection_set_region(region_idx)) { 849 EventGCPhaseParallel event; 850 851 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time); 852 // Scan the strong code root list attached to the current region 853 r->strong_code_roots_do(_pss->closures()->weak_codeblobs()); 854 855 event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase)); 856 } 857 858 return false; 859 } 860 861 Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time; } 862 Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; } 863 864 Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; } 865 Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; } 866 867 size_t opt_refs_scanned() const { return _opt_refs_scanned; } 868 size_t opt_refs_memory_used() const { return _opt_refs_memory_used; } 869 }; 870 871 void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss, 872 uint worker_id, 873 G1GCPhaseTimes::GCParPhases scan_phase, 874 G1GCPhaseTimes::GCParPhases coderoots_phase, 875 G1GCPhaseTimes::GCParPhases objcopy_phase) { 876 G1ScanCollectionSetRegionClosure cl(_scan_state, pss, worker_id, scan_phase, coderoots_phase); 877 _g1h->collection_set_iterate_increment_from(&cl, worker_id); 878 879 G1GCPhaseTimes* p = _g1h->phase_times(); 880 881 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_root_scan_time().seconds()); 882 p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds()); 883 884 p->record_or_add_time_secs(coderoots_phase, worker_id, cl.strong_code_root_scan_time().seconds()); 885 p->add_time_secs(objcopy_phase, worker_id, cl.strong_code_root_trim_partially_time().seconds()); 886 887 // At this time we record some metrics only for the evacuations after the initial one. 888 if (scan_phase == G1GCPhaseTimes::OptScanHR) { 889 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanHRScannedOptRefs); 890 p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanHRUsedMemory); 891 } 892 } 893 894 void G1RemSet::prepare_region_for_scan(HeapRegion* region) { 895 uint hrm_index = region->hrm_index(); 896 897 if (region->in_collection_set()) { 898 // Young regions had their card table marked as young at their allocation; 899 // we need to make sure that these marks are cleared at the end of GC, *but* 900 // they should not be scanned for cards. 901 // So directly add them to the "all_dirty_regions". 902 // Same for regions in the (initial) collection set: they may contain cards from 903 // the log buffers, make sure they are cleaned. 904 _scan_state->add_all_dirty_region(hrm_index); 905 } else if (region->is_old_or_humongous_or_archive()) { 906 _scan_state->set_scan_top(hrm_index, region->top()); 907 } else { 908 assert(region->is_free(), "Should only be free region at this point %s", region->get_type_str()); 909 } 910 } 911 912 void G1RemSet::prepare_for_scan_heap_roots() { 913 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 914 dcqs.concatenate_logs(); 915 916 _scan_state->prepare(); 917 } 918 919 class G1MergeHeapRootsTask : public AbstractGangTask { 920 921 // Visitor for remembered sets, dropping entries onto the card table. 922 class G1MergeCardSetClosure : public HeapRegionClosure { 923 G1RemSetScanState* _scan_state; 924 G1CardTable* _ct; 925 926 uint _merged_sparse; 927 uint _merged_fine; 928 uint _merged_coarse; 929 930 size_t _cards_dirty; 931 932 // Returns if the region contains cards we need to scan. If so, remember that 933 // region in the current set of dirty regions. 934 bool remember_if_interesting(uint const region_idx) { 935 if (!_scan_state->contains_cards_to_process(region_idx)) { 936 return false; 937 } 938 _scan_state->add_dirty_region(region_idx); 939 return true; 940 } 941 public: 942 G1MergeCardSetClosure(G1RemSetScanState* scan_state) : 943 _scan_state(scan_state), 944 _ct(G1CollectedHeap::heap()->card_table()), 945 _merged_sparse(0), 946 _merged_fine(0), 947 _merged_coarse(0), 948 _cards_dirty(0) { } 949 950 void next_coarse_prt(uint const region_idx) { 951 if (!remember_if_interesting(region_idx)) { 952 return; 953 } 954 955 _merged_coarse++; 956 957 size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 958 _cards_dirty += _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion); 959 _scan_state->set_chunk_region_dirty(region_base_idx); 960 } 961 962 void next_fine_prt(uint const region_idx, BitMap* bm) { 963 if (!remember_if_interesting(region_idx)) { 964 return; 965 } 966 967 _merged_fine++; 968 969 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 970 BitMap::idx_t cur = bm->get_next_one_offset(0); 971 while (cur != bm->size()) { 972 _cards_dirty += _ct->mark_clean_as_dirty(region_base_idx + cur); 973 _scan_state->set_chunk_dirty(region_base_idx + cur); 974 cur = bm->get_next_one_offset(cur + 1); 975 } 976 } 977 978 void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) { 979 if (!remember_if_interesting(region_idx)) { 980 return; 981 } 982 983 _merged_sparse++; 984 985 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion; 986 for (uint i = 0; i < num_cards; i++) { 987 size_t card_idx = region_base_idx + cards[i]; 988 _cards_dirty += _ct->mark_clean_as_dirty(card_idx); 989 _scan_state->set_chunk_dirty(card_idx); 990 } 991 } 992 993 virtual bool do_heap_region(HeapRegion* r) { 994 assert(r->in_collection_set() || r->is_starts_humongous(), "must be"); 995 996 HeapRegionRemSet* rem_set = r->rem_set(); 997 if (!rem_set->is_empty()) { 998 rem_set->iterate_prts(*this); 999 } 1000 1001 return false; 1002 } 1003 1004 size_t merged_sparse() const { return _merged_sparse; } 1005 size_t merged_fine() const { return _merged_fine; } 1006 size_t merged_coarse() const { return _merged_coarse; } 1007 1008 size_t cards_dirty() const { return _cards_dirty; } 1009 }; 1010 1011 // Visitor for the remembered sets of humongous candidate regions to merge their 1012 // remembered set into the card table. 1013 class G1FlushHumongousCandidateRemSets : public HeapRegionClosure { 1014 G1MergeCardSetClosure _cl; 1015 1016 public: 1017 G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { } 1018 1019 virtual bool do_heap_region(HeapRegion* r) { 1020 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1021 1022 if (!r->is_starts_humongous() || 1023 !g1h->region_attr(r->hrm_index()).is_humongous() || 1024 r->rem_set()->is_empty()) { 1025 return false; 1026 } 1027 1028 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), 1029 "Found a not-small remembered set here. This is inconsistent with previous assumptions."); 1030 1031 _cl.do_heap_region(r); 1032 1033 // We should only clear the card based remembered set here as we will not 1034 // implicitly rebuild anything else during eager reclaim. Note that at the moment 1035 // (and probably never) we do not enter this path if there are other kind of 1036 // remembered sets for this region. 1037 r->rem_set()->clear_locked(true /* only_cardset */); 1038 // Clear_locked() above sets the state to Empty. However we want to continue 1039 // collecting remembered set entries for humongous regions that were not 1040 // reclaimed. 1041 r->rem_set()->set_state_complete(); 1042 #ifdef ASSERT 1043 G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index()); 1044 assert(region_attr.needs_remset_update(), "must be"); 1045 #endif 1046 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); 1047 1048 return false; 1049 } 1050 1051 size_t merged_sparse() const { return _cl.merged_sparse(); } 1052 size_t merged_fine() const { return _cl.merged_fine(); } 1053 size_t merged_coarse() const { return _cl.merged_coarse(); } 1054 1055 size_t cards_dirty() const { return _cl.cards_dirty(); } 1056 }; 1057 1058 // Visitor for the log buffer entries to merge them into the card table. 1059 class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure { 1060 G1RemSetScanState* _scan_state; 1061 G1CardTable* _ct; 1062 1063 size_t _cards_dirty; 1064 size_t _cards_skipped; 1065 public: 1066 G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) : 1067 _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0) 1068 {} 1069 1070 void do_card_ptr(CardValue* card_ptr, uint worker_id) { 1071 // The only time we care about recording cards that 1072 // contain references that point into the collection set 1073 // is during RSet updating within an evacuation pause. 1074 // In this case worker_id should be the id of a GC worker thread. 1075 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 1076 1077 uint const region_idx = _ct->region_idx_for(card_ptr); 1078 1079 // The second clause must come after - the log buffers might contain cards to uncommited 1080 // regions. 1081 // This code may count duplicate entries in the log buffers (even if rare) multiple 1082 // times. 1083 if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) { 1084 _scan_state->add_dirty_region(region_idx); 1085 _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr)); 1086 _cards_dirty++; 1087 } else { 1088 // We may have had dirty cards in the (initial) collection set (or the 1089 // young regions which are always in the initial collection set). We do 1090 // not fix their cards here: we already added these regions to the set of 1091 // regions to clear the card table at the end during the prepare() phase. 1092 _cards_skipped++; 1093 } 1094 } 1095 1096 size_t cards_dirty() const { return _cards_dirty; } 1097 size_t cards_skipped() const { return _cards_skipped; } 1098 }; 1099 1100 HeapRegionClaimer _hr_claimer; 1101 G1RemSetScanState* _scan_state; 1102 BufferNode::Stack _dirty_card_buffers; 1103 bool _initial_evacuation; 1104 1105 volatile bool _fast_reclaim_handled; 1106 1107 void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) { 1108 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1109 size_t buffer_size = dcqs.buffer_size(); 1110 while (BufferNode* node = _dirty_card_buffers.pop()) { 1111 cl->apply_to_buffer(node, buffer_size, worker_id); 1112 dcqs.deallocate_buffer(node); 1113 } 1114 } 1115 1116 public: 1117 G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) : 1118 AbstractGangTask("G1 Merge Heap Roots"), 1119 _hr_claimer(num_workers), 1120 _scan_state(scan_state), 1121 _dirty_card_buffers(), 1122 _initial_evacuation(initial_evacuation), 1123 _fast_reclaim_handled(false) 1124 { 1125 if (initial_evacuation) { 1126 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 1127 G1BufferNodeList buffers = dcqs.take_all_completed_buffers(); 1128 if (buffers._entry_count != 0) { 1129 _dirty_card_buffers.prepend(*buffers._head, *buffers._tail); 1130 } 1131 } 1132 } 1133 1134 virtual void work(uint worker_id) { 1135 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1136 G1GCPhaseTimes* p = g1h->phase_times(); 1137 1138 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ? 1139 G1GCPhaseTimes::MergeRS : 1140 G1GCPhaseTimes::OptMergeRS; 1141 1142 // We schedule flushing the remembered sets of humongous fast reclaim candidates 1143 // onto the card table first to allow the remaining parallelized tasks hide it. 1144 if (_initial_evacuation && 1145 p->fast_reclaim_humongous_candidates() > 0 && 1146 !_fast_reclaim_handled && 1147 !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) { 1148 1149 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id); 1150 1151 G1FlushHumongousCandidateRemSets cl(_scan_state); 1152 g1h->heap_region_iterate(&cl); 1153 1154 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1155 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1156 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1157 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards); 1158 } 1159 1160 // Merge remembered sets of current candidates. 1161 { 1162 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */); 1163 G1MergeCardSetClosure cl(_scan_state); 1164 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id); 1165 1166 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse); 1167 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine); 1168 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse); 1169 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards); 1170 } 1171 1172 // Apply closure to log entries in the HCC. 1173 if (_initial_evacuation && G1HotCardCache::default_use_cache()) { 1174 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1175 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id); 1176 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1177 g1h->iterate_hcc_closure(&cl, worker_id); 1178 1179 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards); 1180 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards); 1181 } 1182 1183 // Now apply the closure to all remaining log entries. 1184 if (_initial_evacuation) { 1185 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase"); 1186 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id); 1187 1188 G1MergeLogBufferCardsClosure cl(g1h, _scan_state); 1189 apply_closure_to_dirty_card_buffers(&cl, worker_id); 1190 1191 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards); 1192 p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards); 1193 } 1194 } 1195 }; 1196 1197 void G1RemSet::print_merge_heap_roots_stats() { 1198 size_t num_visited_cards = _scan_state->num_visited_cards(); 1199 1200 size_t total_dirty_region_cards = _scan_state->num_cards_in_dirty_regions(); 1201 1202 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1203 size_t total_old_region_cards = 1204 (g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion; 1205 1206 log_debug(gc,remset)("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)", 1207 num_visited_cards, 1208 total_dirty_region_cards, 1209 percent_of(num_visited_cards, total_dirty_region_cards), 1210 total_old_region_cards, 1211 percent_of(num_visited_cards, total_old_region_cards)); 1212 } 1213 1214 void G1RemSet::merge_heap_roots(bool initial_evacuation) { 1215 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1216 1217 { 1218 Ticks start = Ticks::now(); 1219 1220 _scan_state->prepare_for_merge_heap_roots(); 1221 1222 Tickspan total = Ticks::now() - start; 1223 if (initial_evacuation) { 1224 g1h->phase_times()->record_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1225 } else { 1226 g1h->phase_times()->record_or_add_optional_prepare_merge_heap_roots_time(total.seconds() * 1000.0); 1227 } 1228 } 1229 1230 WorkGang* workers = g1h->workers(); 1231 size_t const increment_length = g1h->collection_set()->increment_length(); 1232 1233 uint const num_workers = initial_evacuation ? workers->active_workers() : 1234 MIN2(workers->active_workers(), (uint)increment_length); 1235 1236 { 1237 G1MergeHeapRootsTask cl(_scan_state, num_workers, initial_evacuation); 1238 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " regions", 1239 cl.name(), num_workers, increment_length); 1240 workers->run_task(&cl, num_workers); 1241 } 1242 1243 if (log_is_enabled(Debug, gc, remset)) { 1244 print_merge_heap_roots_stats(); 1245 } 1246 } 1247 1248 void G1RemSet::exclude_region_from_scan(uint region_idx) { 1249 _scan_state->clear_scan_top(region_idx); 1250 } 1251 1252 void G1RemSet::cleanup_after_scan_heap_roots() { 1253 G1GCPhaseTimes* phase_times = _g1h->phase_times(); 1254 1255 // Set all cards back to clean. 1256 double start = os::elapsedTime(); 1257 _scan_state->cleanup(_g1h->workers()); 1258 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 1259 } 1260 1261 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) { 1262 #ifdef ASSERT 1263 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1264 assert(g1h->is_in_exact(ct->addr_for(card_ptr)), 1265 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 1266 p2i(card_ptr), 1267 ct->index_for(ct->addr_for(card_ptr)), 1268 p2i(ct->addr_for(card_ptr)), 1269 g1h->addr_to_region(ct->addr_for(card_ptr))); 1270 #endif 1271 } 1272 1273 bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) { 1274 assert(!_g1h->is_gc_active(), "Only call concurrently"); 1275 1276 CardValue* card_ptr = *card_ptr_addr; 1277 // Find the start address represented by the card. 1278 HeapWord* start = _ct->addr_for(card_ptr); 1279 // And find the region containing it. 1280 HeapRegion* r = _g1h->heap_region_containing_or_null(start); 1281 1282 // If this is a (stale) card into an uncommitted region, exit. 1283 if (r == NULL) { 1284 return false; 1285 } 1286 1287 check_card_ptr(card_ptr, _ct); 1288 1289 // If the card is no longer dirty, nothing to do. 1290 // We cannot load the card value before the "r == NULL" check, because G1 1291 // could uncommit parts of the card table covering uncommitted regions. 1292 if (*card_ptr != G1CardTable::dirty_card_val()) { 1293 return false; 1294 } 1295 1296 // This check is needed for some uncommon cases where we should 1297 // ignore the card. 1298 // 1299 // The region could be young. Cards for young regions are 1300 // distinctly marked (set to g1_young_gen), so the post-barrier will 1301 // filter them out. However, that marking is performed 1302 // concurrently. A write to a young object could occur before the 1303 // card has been marked young, slipping past the filter. 1304 // 1305 // The card could be stale, because the region has been freed since 1306 // the card was recorded. In this case the region type could be 1307 // anything. If (still) free or (reallocated) young, just ignore 1308 // it. If (reallocated) old or humongous, the later card trimming 1309 // and additional checks in iteration may detect staleness. At 1310 // worst, we end up processing a stale card unnecessarily. 1311 // 1312 // In the normal (non-stale) case, the synchronization between the 1313 // enqueueing of the card and processing it here will have ensured 1314 // we see the up-to-date region type here. 1315 if (!r->is_old_or_humongous_or_archive()) { 1316 return false; 1317 } 1318 1319 // The result from the hot card cache insert call is either: 1320 // * pointer to the current card 1321 // (implying that the current card is not 'hot'), 1322 // * null 1323 // (meaning we had inserted the card ptr into the "hot" card cache, 1324 // which had some headroom), 1325 // * a pointer to a "hot" card that was evicted from the "hot" cache. 1326 // 1327 1328 if (_hot_card_cache->use_cache()) { 1329 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 1330 1331 const CardValue* orig_card_ptr = card_ptr; 1332 card_ptr = _hot_card_cache->insert(card_ptr); 1333 if (card_ptr == NULL) { 1334 // There was no eviction. Nothing to do. 1335 return false; 1336 } else if (card_ptr != orig_card_ptr) { 1337 // Original card was inserted and an old card was evicted. 1338 start = _ct->addr_for(card_ptr); 1339 r = _g1h->heap_region_containing(start); 1340 1341 // Check whether the region formerly in the cache should be 1342 // ignored, as discussed earlier for the original card. The 1343 // region could have been freed while in the cache. 1344 if (!r->is_old_or_humongous_or_archive()) { 1345 return false; 1346 } 1347 *card_ptr_addr = card_ptr; 1348 } // Else we still have the original card. 1349 } 1350 1351 // Trim the region designated by the card to what's been allocated 1352 // in the region. The card could be stale, or the card could cover 1353 // (part of) an object at the end of the allocated space and extend 1354 // beyond the end of allocation. 1355 1356 // Non-humongous objects are either allocated in the old regions during GC, 1357 // or mapped in archive regions during startup. So if region is old or 1358 // archive then top is stable. 1359 // Humongous object allocation sets top last; if top has not yet been set, 1360 // this is a stale card and we'll end up with an empty intersection. 1361 // If this is not a stale card, the synchronization between the 1362 // enqueuing of the card and processing it here will have ensured 1363 // we see the up-to-date top here. 1364 HeapWord* scan_limit = r->top(); 1365 1366 if (scan_limit <= start) { 1367 // If the trimmed region is empty, the card must be stale. 1368 return false; 1369 } 1370 1371 // Okay to clean and process the card now. There are still some 1372 // stale card cases that may be detected by iteration and dealt with 1373 // as iteration failure. 1374 *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val(); 1375 1376 return true; 1377 } 1378 1379 void G1RemSet::refine_card_concurrently(CardValue* const card_ptr, 1380 const uint worker_id) { 1381 assert(!_g1h->is_gc_active(), "Only call concurrently"); 1382 check_card_ptr(card_ptr, _ct); 1383 1384 // Construct the MemRegion representing the card. 1385 HeapWord* start = _ct->addr_for(card_ptr); 1386 // And find the region containing it. 1387 HeapRegion* r = _g1h->heap_region_containing(start); 1388 // This reload of the top is safe even though it happens after the full 1389 // fence, because top is stable for old, archive and unfiltered humongous 1390 // regions, so it must return the same value as the previous load when 1391 // cleaning the card. Also cleaning the card and refinement of the card 1392 // cannot span across safepoint, so we don't need to worry about top being 1393 // changed during safepoint. 1394 HeapWord* scan_limit = r->top(); 1395 assert(scan_limit > start, "sanity"); 1396 1397 // Don't use addr_for(card_ptr + 1) which can ask for 1398 // a card beyond the heap. 1399 HeapWord* end = start + G1CardTable::card_size_in_words; 1400 MemRegion dirty_region(start, MIN2(scan_limit, end)); 1401 assert(!dirty_region.is_empty(), "sanity"); 1402 1403 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id); 1404 if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) { 1405 return; 1406 } 1407 1408 // If unable to process the card then we encountered an unparsable 1409 // part of the heap (e.g. a partially allocated object, so only 1410 // temporarily a problem) while processing a stale card. Despite 1411 // the card being stale, we can't simply ignore it, because we've 1412 // already marked the card cleaned, so taken responsibility for 1413 // ensuring the card gets scanned. 1414 // 1415 // However, the card might have gotten re-dirtied and re-enqueued 1416 // while we worked. (In fact, it's pretty likely.) 1417 if (*card_ptr == G1CardTable::dirty_card_val()) { 1418 return; 1419 } 1420 1421 // Re-dirty the card and enqueue in the *shared* queue. Can't use 1422 // the thread-local queue, because that might be the queue that is 1423 // being processed by us; we could be a Java thread conscripted to 1424 // perform refinement on our queue's current buffer. 1425 *card_ptr = G1CardTable::dirty_card_val(); 1426 G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr); 1427 } 1428 1429 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 1430 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 1431 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 1432 1433 G1RemSetSummary current; 1434 _prev_period_summary.subtract_from(¤t); 1435 1436 Log(gc, remset) log; 1437 log.trace("%s", header); 1438 ResourceMark rm; 1439 LogStream ls(log.trace()); 1440 _prev_period_summary.print_on(&ls); 1441 1442 _prev_period_summary.set(¤t); 1443 } 1444 } 1445 1446 void G1RemSet::print_summary_info() { 1447 Log(gc, remset, exit) log; 1448 if (log.is_trace()) { 1449 log.trace(" Cumulative RS summary"); 1450 G1RemSetSummary current; 1451 ResourceMark rm; 1452 LogStream ls(log.trace()); 1453 current.print_on(&ls); 1454 } 1455 } 1456 1457 class G1RebuildRemSetTask: public AbstractGangTask { 1458 // Aggregate the counting data that was constructed concurrently 1459 // with marking. 1460 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 1461 G1ConcurrentMark* _cm; 1462 G1RebuildRemSetClosure _update_cl; 1463 1464 // Applies _update_cl to the references of the given object, limiting objArrays 1465 // to the given MemRegion. Returns the amount of words actually scanned. 1466 size_t scan_for_references(oop const obj, MemRegion mr) { 1467 size_t const obj_size = obj->size(); 1468 // All non-objArrays and objArrays completely within the mr 1469 // can be scanned without passing the mr. 1470 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 1471 obj->oop_iterate(&_update_cl); 1472 return obj_size; 1473 } 1474 // This path is for objArrays crossing the given MemRegion. Only scan the 1475 // area within the MemRegion. 1476 obj->oop_iterate(&_update_cl, mr); 1477 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 1478 } 1479 1480 // A humongous object is live (with respect to the scanning) either 1481 // a) it is marked on the bitmap as such 1482 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 1483 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 1484 return bitmap->is_marked(humongous_obj) || (tars > tams); 1485 } 1486 1487 // Iterator over the live objects within the given MemRegion. 1488 class LiveObjIterator : public StackObj { 1489 const G1CMBitMap* const _bitmap; 1490 const HeapWord* _tams; 1491 const MemRegion _mr; 1492 HeapWord* _current; 1493 1494 bool is_below_tams() const { 1495 return _current < _tams; 1496 } 1497 1498 bool is_live(HeapWord* obj) const { 1499 return !is_below_tams() || _bitmap->is_marked(obj); 1500 } 1501 1502 HeapWord* bitmap_limit() const { 1503 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 1504 } 1505 1506 void move_if_below_tams() { 1507 if (is_below_tams() && has_next()) { 1508 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1509 } 1510 } 1511 public: 1512 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 1513 _bitmap(bitmap), 1514 _tams(tams), 1515 _mr(mr), 1516 _current(first_oop_into_mr) { 1517 1518 assert(_current <= _mr.start(), 1519 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 1520 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 1521 1522 // Step to the next live object within the MemRegion if needed. 1523 if (is_live(_current)) { 1524 // Non-objArrays were scanned by the previous part of that region. 1525 if (_current < mr.start() && !oop(_current)->is_objArray()) { 1526 _current += oop(_current)->size(); 1527 // We might have positioned _current on a non-live object. Reposition to the next 1528 // live one if needed. 1529 move_if_below_tams(); 1530 } 1531 } else { 1532 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 1533 // immediately. 1534 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 1535 assert(_current == _mr.end() || is_live(_current), 1536 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 1537 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 1538 } 1539 } 1540 1541 void move_to_next() { 1542 _current += next()->size(); 1543 move_if_below_tams(); 1544 } 1545 1546 oop next() const { 1547 oop result = oop(_current); 1548 assert(is_live(_current), 1549 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 1550 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 1551 return result; 1552 } 1553 1554 bool has_next() const { 1555 return _current < _mr.end(); 1556 } 1557 }; 1558 1559 // Rebuild remembered sets in the part of the region specified by mr and hr. 1560 // Objects between the bottom of the region and the TAMS are checked for liveness 1561 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 1562 // Returns the number of live words between bottom and TAMS. 1563 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 1564 HeapWord* const top_at_mark_start, 1565 HeapWord* const top_at_rebuild_start, 1566 HeapRegion* hr, 1567 MemRegion mr) { 1568 size_t marked_words = 0; 1569 1570 if (hr->is_humongous()) { 1571 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 1572 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 1573 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 1574 // however in case of humongous objects it is sufficient to scan the encompassing 1575 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 1576 // two areas will be zero sized. I.e. TAMS is either 1577 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 1578 // value: this would mean that TAMS points somewhere into the object. 1579 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 1580 "More than one object in the humongous region?"); 1581 humongous_obj->oop_iterate(&_update_cl, mr); 1582 return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; 1583 } else { 1584 return 0; 1585 } 1586 } 1587 1588 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 1589 oop obj = it.next(); 1590 size_t scanned_size = scan_for_references(obj, mr); 1591 if ((HeapWord*)obj < top_at_mark_start) { 1592 marked_words += scanned_size; 1593 } 1594 } 1595 1596 return marked_words * HeapWordSize; 1597 } 1598 public: 1599 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 1600 G1ConcurrentMark* cm, 1601 uint worker_id) : 1602 HeapRegionClosure(), 1603 _cm(cm), 1604 _update_cl(g1h, worker_id) { } 1605 1606 bool do_heap_region(HeapRegion* hr) { 1607 if (_cm->has_aborted()) { 1608 return true; 1609 } 1610 1611 uint const region_idx = hr->hrm_index(); 1612 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 1613 assert(top_at_rebuild_start_check == NULL || 1614 top_at_rebuild_start_check > hr->bottom(), 1615 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 1616 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 1617 1618 size_t total_marked_bytes = 0; 1619 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 1620 1621 HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start(); 1622 1623 HeapWord* cur = hr->bottom(); 1624 while (cur < hr->end()) { 1625 // After every iteration (yield point) we need to check whether the region's 1626 // TARS changed due to e.g. eager reclaim. 1627 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 1628 if (top_at_rebuild_start == NULL) { 1629 return false; 1630 } 1631 1632 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 1633 if (next_chunk.is_empty()) { 1634 break; 1635 } 1636 1637 const Ticks start = Ticks::now(); 1638 size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(), 1639 top_at_mark_start, 1640 top_at_rebuild_start, 1641 hr, 1642 next_chunk); 1643 Tickspan time = Ticks::now() - start; 1644 1645 log_trace(gc, remset, tracking)("Rebuilt region %u " 1646 "live " SIZE_FORMAT " " 1647 "time %.3fms " 1648 "marked bytes " SIZE_FORMAT " " 1649 "bot " PTR_FORMAT " " 1650 "TAMS " PTR_FORMAT " " 1651 "TARS " PTR_FORMAT, 1652 region_idx, 1653 _cm->liveness(region_idx) * HeapWordSize, 1654 time.seconds() * 1000.0, 1655 marked_bytes, 1656 p2i(hr->bottom()), 1657 p2i(top_at_mark_start), 1658 p2i(top_at_rebuild_start)); 1659 1660 if (marked_bytes > 0) { 1661 total_marked_bytes += marked_bytes; 1662 } 1663 cur += chunk_size_in_words; 1664 1665 _cm->do_yield_check(); 1666 if (_cm->has_aborted()) { 1667 return true; 1668 } 1669 } 1670 // In the final iteration of the loop the region might have been eagerly reclaimed. 1671 // Simply filter out those regions. We can not just use region type because there 1672 // might have already been new allocations into these regions. 1673 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 1674 assert(top_at_rebuild_start == NULL || 1675 total_marked_bytes == hr->marked_bytes(), 1676 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " " 1677 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 1678 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(), 1679 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 1680 // Abort state may have changed after the yield check. 1681 return _cm->has_aborted(); 1682 } 1683 }; 1684 1685 HeapRegionClaimer _hr_claimer; 1686 G1ConcurrentMark* _cm; 1687 1688 uint _worker_id_offset; 1689 public: 1690 G1RebuildRemSetTask(G1ConcurrentMark* cm, 1691 uint n_workers, 1692 uint worker_id_offset) : 1693 AbstractGangTask("G1 Rebuild Remembered Set"), 1694 _hr_claimer(n_workers), 1695 _cm(cm), 1696 _worker_id_offset(worker_id_offset) { 1697 } 1698 1699 void work(uint worker_id) { 1700 SuspendibleThreadSetJoiner sts_join; 1701 1702 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1703 1704 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 1705 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 1706 } 1707 }; 1708 1709 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 1710 WorkGang* workers, 1711 uint worker_id_offset) { 1712 uint num_workers = workers->active_workers(); 1713 1714 G1RebuildRemSetTask cl(cm, 1715 num_workers, 1716 worker_id_offset); 1717 workers->run_task(&cl, num_workers); 1718 }