1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1ConcurrentRefine.hpp" 29 #include "gc/g1/heapRegionManager.inline.hpp" 30 #include "gc/g1/heapRegionRemSet.hpp" 31 #include "gc/shared/space.inline.hpp" 32 #include "memory/allocation.hpp" 33 #include "memory/padded.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/atomic.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/debug.hpp" 38 #include "utilities/formatBuffer.hpp" 39 #include "utilities/globalDefinitions.hpp" 40 #include "utilities/growableArray.hpp" 41 42 const char* HeapRegionRemSet::_state_strings[] = {"Untracked", "Updating", "Complete"}; 43 const char* HeapRegionRemSet::_short_state_strings[] = {"UNTRA", "UPDAT", "CMPLT"}; 44 45 class PerRegionTable: public CHeapObj<mtGC> { 46 friend class OtherRegionsTable; 47 friend class HeapRegionRemSetIterator; 48 49 HeapRegion* _hr; 50 CHeapBitMap _bm; 51 jint _occupied; 52 53 // next pointer for free/allocated 'all' list 54 PerRegionTable* _next; 55 56 // prev pointer for the allocated 'all' list 57 PerRegionTable* _prev; 58 59 // next pointer in collision list 60 PerRegionTable * _collision_list_next; 61 62 // Global free list of PRTs 63 static PerRegionTable* volatile _free_list; 64 65 protected: 66 // We need access in order to union things into the base table. 67 BitMap* bm() { return &_bm; } 68 69 PerRegionTable(HeapRegion* hr) : 70 _hr(hr), 71 _occupied(0), 72 _bm(HeapRegion::CardsPerRegion, mtGC), 73 _collision_list_next(NULL), _next(NULL), _prev(NULL) 74 {} 75 76 void add_card_work(CardIdx_t from_card, bool par) { 77 if (!_bm.at(from_card)) { 78 if (par) { 79 if (_bm.par_at_put(from_card, 1)) { 80 Atomic::inc(&_occupied); 81 } 82 } else { 83 _bm.at_put(from_card, 1); 84 _occupied++; 85 } 86 } 87 } 88 89 void add_reference_work(OopOrNarrowOopStar from, bool par) { 90 // Must make this robust in case "from" is not in "_hr", because of 91 // concurrency. 92 93 HeapRegion* loc_hr = hr(); 94 // If the test below fails, then this table was reused concurrently 95 // with this operation. This is OK, since the old table was coarsened, 96 // and adding a bit to the new table is never incorrect. 97 if (loc_hr->is_in_reserved(from)) { 98 CardIdx_t from_card = OtherRegionsTable::card_within_region(from, loc_hr); 99 add_card_work(from_card, par); 100 } 101 } 102 103 public: 104 105 HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } 106 107 jint occupied() const { 108 // Overkill, but if we ever need it... 109 // guarantee(_occupied == _bm.count_one_bits(), "Check"); 110 return _occupied; 111 } 112 113 void init(HeapRegion* hr, bool clear_links_to_all_list) { 114 if (clear_links_to_all_list) { 115 set_next(NULL); 116 set_prev(NULL); 117 } 118 _collision_list_next = NULL; 119 _occupied = 0; 120 _bm.clear(); 121 // Make sure that the bitmap clearing above has been finished before publishing 122 // this PRT to concurrent threads. 123 OrderAccess::release_store(&_hr, hr); 124 } 125 126 void add_reference(OopOrNarrowOopStar from) { 127 add_reference_work(from, /*parallel*/ true); 128 } 129 130 void seq_add_reference(OopOrNarrowOopStar from) { 131 add_reference_work(from, /*parallel*/ false); 132 } 133 134 void add_card(CardIdx_t from_card_index) { 135 add_card_work(from_card_index, /*parallel*/ true); 136 } 137 138 void seq_add_card(CardIdx_t from_card_index) { 139 add_card_work(from_card_index, /*parallel*/ false); 140 } 141 142 // (Destructively) union the bitmap of the current table into the given 143 // bitmap (which is assumed to be of the same size.) 144 void union_bitmap_into(BitMap* bm) { 145 bm->set_union(_bm); 146 } 147 148 // Mem size in bytes. 149 size_t mem_size() const { 150 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; 151 } 152 153 // Requires "from" to be in "hr()". 154 bool contains_reference(OopOrNarrowOopStar from) const { 155 assert(hr()->is_in_reserved(from), "Precondition."); 156 size_t card_ind = pointer_delta(from, hr()->bottom(), 157 G1CardTable::card_size); 158 return _bm.at(card_ind); 159 } 160 161 // Bulk-free the PRTs from prt to last, assumes that they are 162 // linked together using their _next field. 163 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { 164 while (true) { 165 PerRegionTable* fl = _free_list; 166 last->set_next(fl); 167 PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); 168 if (res == fl) { 169 return; 170 } 171 } 172 ShouldNotReachHere(); 173 } 174 175 static void free(PerRegionTable* prt) { 176 bulk_free(prt, prt); 177 } 178 179 // Returns an initialized PerRegionTable instance. 180 static PerRegionTable* alloc(HeapRegion* hr) { 181 PerRegionTable* fl = _free_list; 182 while (fl != NULL) { 183 PerRegionTable* nxt = fl->next(); 184 PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl); 185 if (res == fl) { 186 fl->init(hr, true); 187 return fl; 188 } else { 189 fl = _free_list; 190 } 191 } 192 assert(fl == NULL, "Loop condition."); 193 return new PerRegionTable(hr); 194 } 195 196 PerRegionTable* next() const { return _next; } 197 void set_next(PerRegionTable* next) { _next = next; } 198 PerRegionTable* prev() const { return _prev; } 199 void set_prev(PerRegionTable* prev) { _prev = prev; } 200 201 // Accessor and Modification routines for the pointer for the 202 // singly linked collision list that links the PRTs within the 203 // OtherRegionsTable::_fine_grain_regions hash table. 204 // 205 // It might be useful to also make the collision list doubly linked 206 // to avoid iteration over the collisions list during scrubbing/deletion. 207 // OTOH there might not be many collisions. 208 209 PerRegionTable* collision_list_next() const { 210 return _collision_list_next; 211 } 212 213 void set_collision_list_next(PerRegionTable* next) { 214 _collision_list_next = next; 215 } 216 217 PerRegionTable** collision_list_next_addr() { 218 return &_collision_list_next; 219 } 220 221 static size_t fl_mem_size() { 222 PerRegionTable* cur = _free_list; 223 size_t res = 0; 224 while (cur != NULL) { 225 res += cur->mem_size(); 226 cur = cur->next(); 227 } 228 return res; 229 } 230 231 static void test_fl_mem_size(); 232 }; 233 234 PerRegionTable* volatile PerRegionTable::_free_list = NULL; 235 236 size_t OtherRegionsTable::_max_fine_entries = 0; 237 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; 238 size_t OtherRegionsTable::_fine_eviction_stride = 0; 239 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; 240 241 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) : 242 _g1h(G1CollectedHeap::heap()), 243 _hr(hr), _m(m), 244 _coarse_map(G1CollectedHeap::heap()->max_regions(), mtGC), 245 _fine_grain_regions(NULL), 246 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL), 247 _n_fine_entries(0), _n_coarse_entries(0), 248 _fine_eviction_start(0), 249 _sparse_table(hr) 250 { 251 typedef PerRegionTable* PerRegionTablePtr; 252 253 if (_max_fine_entries == 0) { 254 assert(_mod_max_fine_entries_mask == 0, "Both or none."); 255 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); 256 _max_fine_entries = (size_t)1 << max_entries_log; 257 _mod_max_fine_entries_mask = _max_fine_entries - 1; 258 259 assert(_fine_eviction_sample_size == 0 260 && _fine_eviction_stride == 0, "All init at same time."); 261 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); 262 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; 263 } 264 265 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, 266 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 267 268 if (_fine_grain_regions == NULL) { 269 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, 270 "Failed to allocate _fine_grain_entries."); 271 } 272 273 for (size_t i = 0; i < _max_fine_entries; i++) { 274 _fine_grain_regions[i] = NULL; 275 } 276 } 277 278 void OtherRegionsTable::link_to_all(PerRegionTable* prt) { 279 // We always append to the beginning of the list for convenience; 280 // the order of entries in this list does not matter. 281 if (_first_all_fine_prts != NULL) { 282 assert(_first_all_fine_prts->prev() == NULL, "invariant"); 283 _first_all_fine_prts->set_prev(prt); 284 prt->set_next(_first_all_fine_prts); 285 } else { 286 // this is the first element we insert. Adjust the "last" pointer 287 _last_all_fine_prts = prt; 288 assert(prt->next() == NULL, "just checking"); 289 } 290 // the new element is always the first element without a predecessor 291 prt->set_prev(NULL); 292 _first_all_fine_prts = prt; 293 294 assert(prt->prev() == NULL, "just checking"); 295 assert(_first_all_fine_prts == prt, "just checking"); 296 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 297 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 298 "just checking"); 299 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 300 "just checking"); 301 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 302 "just checking"); 303 } 304 305 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { 306 if (prt->prev() != NULL) { 307 assert(_first_all_fine_prts != prt, "just checking"); 308 prt->prev()->set_next(prt->next()); 309 // removing the last element in the list? 310 if (_last_all_fine_prts == prt) { 311 _last_all_fine_prts = prt->prev(); 312 } 313 } else { 314 assert(_first_all_fine_prts == prt, "just checking"); 315 _first_all_fine_prts = prt->next(); 316 // list is empty now? 317 if (_first_all_fine_prts == NULL) { 318 _last_all_fine_prts = NULL; 319 } 320 } 321 322 if (prt->next() != NULL) { 323 prt->next()->set_prev(prt->prev()); 324 } 325 326 prt->set_next(NULL); 327 prt->set_prev(NULL); 328 329 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 330 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 331 "just checking"); 332 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 333 "just checking"); 334 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 335 "just checking"); 336 } 337 338 CardIdx_t OtherRegionsTable::card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr) { 339 assert(hr->is_in_reserved(within_region), 340 "HeapWord " PTR_FORMAT " is outside of region %u [" PTR_FORMAT ", " PTR_FORMAT ")", 341 p2i(within_region), hr->hrm_index(), p2i(hr->bottom()), p2i(hr->end())); 342 CardIdx_t result = (CardIdx_t)(pointer_delta((HeapWord*)within_region, hr->bottom()) >> (CardTable::card_shift - LogHeapWordSize)); 343 return result; 344 } 345 346 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { 347 uint cur_hrm_ind = _hr->hrm_index(); 348 349 uintptr_t from_card = uintptr_t(from) >> CardTable::card_shift; 350 351 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { 352 assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from)); 353 return; 354 } 355 356 // Note that this may be a continued H region. 357 HeapRegion* from_hr = _g1h->heap_region_containing(from); 358 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); 359 360 // If the region is already coarsened, return. 361 if (_coarse_map.at(from_hrm_ind)) { 362 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from)); 363 return; 364 } 365 366 // Otherwise find a per-region table to add it to. 367 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; 368 PerRegionTable* prt = find_region_table(ind, from_hr); 369 if (prt == NULL) { 370 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); 371 // Confirm that it's really not there... 372 prt = find_region_table(ind, from_hr); 373 if (prt == NULL) { 374 375 CardIdx_t card_index = card_within_region(from, from_hr); 376 377 if (G1HRRSUseSparseTable && 378 _sparse_table.add_card(from_hrm_ind, card_index)) { 379 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from)); 380 return; 381 } 382 383 if (_n_fine_entries == _max_fine_entries) { 384 prt = delete_region_table(); 385 // There is no need to clear the links to the 'all' list here: 386 // prt will be reused immediately, i.e. remain in the 'all' list. 387 prt->init(from_hr, false /* clear_links_to_all_list */); 388 } else { 389 prt = PerRegionTable::alloc(from_hr); 390 link_to_all(prt); 391 } 392 393 PerRegionTable* first_prt = _fine_grain_regions[ind]; 394 prt->set_collision_list_next(first_prt); 395 // The assignment into _fine_grain_regions allows the prt to 396 // start being used concurrently. In addition to 397 // collision_list_next which must be visible (else concurrent 398 // parsing of the list, if any, may fail to see other entries), 399 // the content of the prt must be visible (else for instance 400 // some mark bits may not yet seem cleared or a 'later' update 401 // performed by a concurrent thread could be undone when the 402 // zeroing becomes visible). This requires store ordering. 403 OrderAccess::release_store(&_fine_grain_regions[ind], prt); 404 _n_fine_entries++; 405 406 if (G1HRRSUseSparseTable) { 407 // Transfer from sparse to fine-grain. 408 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); 409 assert(sprt_entry != NULL, "There should have been an entry"); 410 for (int i = 0; i < sprt_entry->num_valid_cards(); i++) { 411 CardIdx_t c = sprt_entry->card(i); 412 prt->add_card(c); 413 } 414 // Now we can delete the sparse entry. 415 bool res = _sparse_table.delete_entry(from_hrm_ind); 416 assert(res, "It should have been there."); 417 } 418 } 419 assert(prt != NULL && prt->hr() == from_hr, "consequence"); 420 } 421 // Note that we can't assert "prt->hr() == from_hr", because of the 422 // possibility of concurrent reuse. But see head comment of 423 // OtherRegionsTable for why this is OK. 424 assert(prt != NULL, "Inv"); 425 426 prt->add_reference(from); 427 assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT (%d)", p2i(from), prt->contains_reference(from)); 428 } 429 430 PerRegionTable* 431 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { 432 assert(ind < _max_fine_entries, "Preconditions."); 433 PerRegionTable* prt = _fine_grain_regions[ind]; 434 while (prt != NULL && prt->hr() != hr) { 435 prt = prt->collision_list_next(); 436 } 437 // Loop postcondition is the method postcondition. 438 return prt; 439 } 440 441 jint OtherRegionsTable::_n_coarsenings = 0; 442 443 PerRegionTable* OtherRegionsTable::delete_region_table() { 444 assert(_m->owned_by_self(), "Precondition"); 445 assert(_n_fine_entries == _max_fine_entries, "Precondition"); 446 PerRegionTable* max = NULL; 447 jint max_occ = 0; 448 PerRegionTable** max_prev = NULL; 449 size_t max_ind; 450 451 size_t i = _fine_eviction_start; 452 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { 453 size_t ii = i; 454 // Make sure we get a non-NULL sample. 455 while (_fine_grain_regions[ii] == NULL) { 456 ii++; 457 if (ii == _max_fine_entries) ii = 0; 458 guarantee(ii != i, "We must find one."); 459 } 460 PerRegionTable** prev = &_fine_grain_regions[ii]; 461 PerRegionTable* cur = *prev; 462 while (cur != NULL) { 463 jint cur_occ = cur->occupied(); 464 if (max == NULL || cur_occ > max_occ) { 465 max = cur; 466 max_prev = prev; 467 max_ind = i; 468 max_occ = cur_occ; 469 } 470 prev = cur->collision_list_next_addr(); 471 cur = cur->collision_list_next(); 472 } 473 i = i + _fine_eviction_stride; 474 if (i >= _n_fine_entries) i = i - _n_fine_entries; 475 } 476 477 _fine_eviction_start++; 478 479 if (_fine_eviction_start >= _n_fine_entries) { 480 _fine_eviction_start -= _n_fine_entries; 481 } 482 483 guarantee(max != NULL, "Since _n_fine_entries > 0"); 484 guarantee(max_prev != NULL, "Since max != NULL."); 485 486 // Set the corresponding coarse bit. 487 size_t max_hrm_index = (size_t) max->hr()->hrm_index(); 488 if (!_coarse_map.at(max_hrm_index)) { 489 _coarse_map.at_put(max_hrm_index, true); 490 _n_coarse_entries++; 491 } 492 493 // Unsplice. 494 *max_prev = max->collision_list_next(); 495 Atomic::inc(&_n_coarsenings); 496 _n_fine_entries--; 497 return max; 498 } 499 500 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const { 501 if (limit <= (size_t)G1RSetSparseRegionEntries) { 502 return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit; 503 } else { 504 // Current uses of this method may only use values less than G1RSetSparseRegionEntries 505 // for the limit. The solution, comparing against occupied() would be too slow 506 // at this time. 507 Unimplemented(); 508 return false; 509 } 510 } 511 512 bool OtherRegionsTable::is_empty() const { 513 return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL; 514 } 515 516 size_t OtherRegionsTable::occupied() const { 517 size_t sum = occ_fine(); 518 sum += occ_sparse(); 519 sum += occ_coarse(); 520 return sum; 521 } 522 523 size_t OtherRegionsTable::occ_fine() const { 524 size_t sum = 0; 525 526 size_t num = 0; 527 PerRegionTable * cur = _first_all_fine_prts; 528 while (cur != NULL) { 529 sum += cur->occupied(); 530 cur = cur->next(); 531 num++; 532 } 533 guarantee(num == _n_fine_entries, "just checking"); 534 return sum; 535 } 536 537 size_t OtherRegionsTable::occ_coarse() const { 538 return (_n_coarse_entries * HeapRegion::CardsPerRegion); 539 } 540 541 size_t OtherRegionsTable::occ_sparse() const { 542 return _sparse_table.occupied(); 543 } 544 545 size_t OtherRegionsTable::mem_size() const { 546 size_t sum = 0; 547 // all PRTs are of the same size so it is sufficient to query only one of them. 548 if (_first_all_fine_prts != NULL) { 549 assert(_last_all_fine_prts != NULL && 550 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant"); 551 sum += _first_all_fine_prts->mem_size() * _n_fine_entries; 552 } 553 sum += (sizeof(PerRegionTable*) * _max_fine_entries); 554 sum += (_coarse_map.size_in_words() * HeapWordSize); 555 sum += (_sparse_table.mem_size()); 556 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. 557 return sum; 558 } 559 560 size_t OtherRegionsTable::static_mem_size() { 561 return G1FromCardCache::static_mem_size(); 562 } 563 564 size_t OtherRegionsTable::fl_mem_size() { 565 return PerRegionTable::fl_mem_size(); 566 } 567 568 void OtherRegionsTable::clear_fcc() { 569 G1FromCardCache::clear(_hr->hrm_index()); 570 } 571 572 void OtherRegionsTable::clear() { 573 // if there are no entries, skip this step 574 if (_first_all_fine_prts != NULL) { 575 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking"); 576 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts); 577 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0])); 578 } else { 579 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking"); 580 } 581 582 _first_all_fine_prts = _last_all_fine_prts = NULL; 583 _sparse_table.clear(); 584 if (_n_coarse_entries > 0) { 585 _coarse_map.clear(); 586 } 587 _n_fine_entries = 0; 588 _n_coarse_entries = 0; 589 590 clear_fcc(); 591 } 592 593 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { 594 // Cast away const in this case. 595 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag); 596 return contains_reference_locked(from); 597 } 598 599 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { 600 HeapRegion* hr = _g1h->heap_region_containing(from); 601 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); 602 // Is this region in the coarse map? 603 if (_coarse_map.at(hr_ind)) return true; 604 605 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, 606 hr); 607 if (prt != NULL) { 608 return prt->contains_reference(from); 609 610 } else { 611 CardIdx_t card_index = card_within_region(from, hr); 612 return _sparse_table.contains_card(hr_ind, card_index); 613 } 614 } 615 616 void 617 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 618 _sparse_table.do_cleanup_work(hrrs_cleanup_task); 619 } 620 621 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, 622 HeapRegion* hr) 623 : _bot(bot), 624 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never), 625 _code_roots(), 626 _state(Untracked), 627 _other_regions(hr, &_m) { 628 } 629 630 void HeapRegionRemSet::setup_remset_size() { 631 // Setup sparse and fine-grain tables sizes. 632 // table_size = base * (log(region_size / 1M) + 1) 633 const int LOG_M = 20; 634 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0); 635 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) { 636 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1); 637 } 638 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) { 639 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1); 640 } 641 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); 642 } 643 644 void HeapRegionRemSet::cleanup() { 645 SparsePRT::cleanup_all(); 646 } 647 648 void HeapRegionRemSet::clear(bool only_cardset) { 649 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); 650 clear_locked(only_cardset); 651 } 652 653 void HeapRegionRemSet::clear_locked(bool only_cardset) { 654 if (!only_cardset) { 655 _code_roots.clear(); 656 } 657 _other_regions.clear(); 658 set_state_empty(); 659 assert(occupied_locked() == 0, "Should be clear."); 660 } 661 662 // Code roots support 663 // 664 // The code root set is protected by two separate locking schemes 665 // When at safepoint the per-hrrs lock must be held during modifications 666 // except when doing a full gc. 667 // When not at safepoint the CodeCache_lock must be held during modifications. 668 // When concurrent readers access the contains() function 669 // (during the evacuation phase) no removals are allowed. 670 671 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { 672 assert(nm != NULL, "sanity"); 673 assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()), 674 "should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s", 675 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())); 676 // Optimistic unlocked contains-check 677 if (!_code_roots.contains(nm)) { 678 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag); 679 add_strong_code_root_locked(nm); 680 } 681 } 682 683 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { 684 assert(nm != NULL, "sanity"); 685 assert((CodeCache_lock->owned_by_self() || 686 (SafepointSynchronize::is_at_safepoint() && 687 (_m.owned_by_self() || Thread::current()->is_VM_thread()))), 688 "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s", 689 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), 690 BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread())); 691 _code_roots.add(nm); 692 } 693 694 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { 695 assert(nm != NULL, "sanity"); 696 assert_locked_or_safepoint(CodeCache_lock); 697 698 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); 699 _code_roots.remove(nm); 700 701 // Check that there were no duplicates 702 guarantee(!_code_roots.contains(nm), "duplicate entry found"); 703 } 704 705 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { 706 _code_roots.nmethods_do(blk); 707 } 708 709 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { 710 _code_roots.clean(hr); 711 } 712 713 size_t HeapRegionRemSet::strong_code_roots_mem_size() { 714 return _code_roots.mem_size(); 715 } 716 717 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) : 718 _hrrs(hrrs), 719 _g1h(G1CollectedHeap::heap()), 720 _coarse_map(&hrrs->_other_regions._coarse_map), 721 _bot(hrrs->_bot), 722 _is(Sparse), 723 // Set these values so that we increment to the first region. 724 _coarse_cur_region_index(-1), 725 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), 726 _cur_card_in_prt(HeapRegion::CardsPerRegion), 727 _fine_cur_prt(NULL), 728 _n_yielded_coarse(0), 729 _n_yielded_fine(0), 730 _n_yielded_sparse(0), 731 _sparse_iter(&hrrs->_other_regions._sparse_table) {} 732 733 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { 734 if (_hrrs->_other_regions._n_coarse_entries == 0) return false; 735 // Go to the next card. 736 _coarse_cur_region_cur_card++; 737 // Was the last the last card in the current region? 738 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) { 739 // Yes: find the next region. This may leave _coarse_cur_region_index 740 // Set to the last index, in which case there are no more coarse 741 // regions. 742 _coarse_cur_region_index = 743 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); 744 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { 745 _coarse_cur_region_cur_card = 0; 746 HeapWord* r_bot = 747 _g1h->region_at((uint) _coarse_cur_region_index)->bottom(); 748 _cur_region_card_offset = _bot->index_for(r_bot); 749 } else { 750 return false; 751 } 752 } 753 // If we didn't return false above, then we can yield a card. 754 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; 755 return true; 756 } 757 758 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { 759 if (fine_has_next()) { 760 _cur_card_in_prt = 761 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 762 } 763 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) { 764 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for 765 // the remembered set. 766 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) { 767 return false; 768 } 769 PerRegionTable* next_prt = _fine_cur_prt->next(); 770 switch_to_prt(next_prt); 771 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 772 } 773 774 card_index = _cur_region_card_offset + _cur_card_in_prt; 775 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion, 776 "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt); 777 return true; 778 } 779 780 bool HeapRegionRemSetIterator::fine_has_next() { 781 return _cur_card_in_prt != HeapRegion::CardsPerRegion; 782 } 783 784 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) { 785 assert(prt != NULL, "Cannot switch to NULL prt"); 786 _fine_cur_prt = prt; 787 788 HeapWord* r_bot = _fine_cur_prt->hr()->bottom(); 789 _cur_region_card_offset = _bot->index_for(r_bot); 790 791 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1. 792 // To avoid special-casing this start case, and not miss the first bitmap 793 // entry, initialize _cur_region_cur_card with -1 instead of 0. 794 _cur_card_in_prt = (size_t)-1; 795 } 796 797 bool HeapRegionRemSetIterator::has_next(size_t& card_index) { 798 switch (_is) { 799 case Sparse: { 800 if (_sparse_iter.has_next(card_index)) { 801 _n_yielded_sparse++; 802 return true; 803 } 804 // Otherwise, deliberate fall-through 805 _is = Fine; 806 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts; 807 if (initial_fine_prt != NULL) { 808 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts); 809 } 810 } 811 case Fine: 812 if (fine_has_next(card_index)) { 813 _n_yielded_fine++; 814 return true; 815 } 816 // Otherwise, deliberate fall-through 817 _is = Coarse; 818 case Coarse: 819 if (coarse_has_next(card_index)) { 820 _n_yielded_coarse++; 821 return true; 822 } 823 // Otherwise... 824 break; 825 } 826 return false; 827 } 828 829 void HeapRegionRemSet::reset_for_cleanup_tasks() { 830 SparsePRT::reset_for_cleanup_tasks(); 831 } 832 833 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 834 _other_regions.do_cleanup_work(hrrs_cleanup_task); 835 } 836 837 void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) { 838 SparsePRT::finish_cleanup_task(hrrs_cleanup_task); 839 } 840 841 #ifndef PRODUCT 842 void HeapRegionRemSet::test() { 843 os::sleep(Thread::current(), (jlong)5000, false); 844 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 845 846 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same 847 // hash bucket. 848 HeapRegion* hr0 = g1h->region_at(0); 849 HeapRegion* hr1 = g1h->region_at(1); 850 HeapRegion* hr2 = g1h->region_at(5); 851 HeapRegion* hr3 = g1h->region_at(6); 852 HeapRegion* hr4 = g1h->region_at(7); 853 HeapRegion* hr5 = g1h->region_at(8); 854 855 HeapWord* hr1_start = hr1->bottom(); 856 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; 857 HeapWord* hr1_last = hr1->end() - 1; 858 859 HeapWord* hr2_start = hr2->bottom(); 860 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; 861 HeapWord* hr2_last = hr2->end() - 1; 862 863 HeapWord* hr3_start = hr3->bottom(); 864 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; 865 HeapWord* hr3_last = hr3->end() - 1; 866 867 HeapRegionRemSet* hrrs = hr0->rem_set(); 868 869 // Make three references from region 0x101... 870 hrrs->add_reference((OopOrNarrowOopStar)hr1_start); 871 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid); 872 hrrs->add_reference((OopOrNarrowOopStar)hr1_last); 873 874 hrrs->add_reference((OopOrNarrowOopStar)hr2_start); 875 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid); 876 hrrs->add_reference((OopOrNarrowOopStar)hr2_last); 877 878 hrrs->add_reference((OopOrNarrowOopStar)hr3_start); 879 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid); 880 hrrs->add_reference((OopOrNarrowOopStar)hr3_last); 881 882 // Now cause a coarsening. 883 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom()); 884 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); 885 886 // Now, does iteration yield these three? 887 HeapRegionRemSetIterator iter(hrrs); 888 size_t sum = 0; 889 size_t card_index; 890 while (iter.has_next(card_index)) { 891 HeapWord* card_start = 892 G1CollectedHeap::heap()->bot()->address_for_index(card_index); 893 tty->print_cr(" Card " PTR_FORMAT ".", p2i(card_start)); 894 sum++; 895 } 896 guarantee(sum == 11 - 3 + 2048, "Failure"); 897 guarantee(sum == hrrs->occupied(), "Failure"); 898 } 899 #endif