1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/heapRegionManager.inline.hpp" 30 #include "gc/g1/heapRegionRemSet.hpp" 31 #include "gc/shared/space.inline.hpp" 32 #include "memory/allocation.hpp" 33 #include "memory/padded.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/atomic.inline.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/globalDefinitions.hpp" 38 #include "utilities/growableArray.hpp" 39 40 class PerRegionTable: public CHeapObj<mtGC> { 41 friend class OtherRegionsTable; 42 friend class HeapRegionRemSetIterator; 43 44 HeapRegion* _hr; 45 BitMap _bm; 46 jint _occupied; 47 48 // next pointer for free/allocated 'all' list 49 PerRegionTable* _next; 50 51 // prev pointer for the allocated 'all' list 52 PerRegionTable* _prev; 53 54 // next pointer in collision list 55 PerRegionTable * _collision_list_next; 56 57 // Global free list of PRTs 58 static PerRegionTable* _free_list; 59 60 protected: 61 // We need access in order to union things into the base table. 62 BitMap* bm() { return &_bm; } 63 64 void recount_occupied() { 65 _occupied = (jint) bm()->count_one_bits(); 66 } 67 68 PerRegionTable(HeapRegion* hr) : 69 _hr(hr), 70 _occupied(0), 71 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */), 72 _collision_list_next(NULL), _next(NULL), _prev(NULL) 73 {} 74 75 void add_card_work(CardIdx_t from_card, bool par) { 76 if (!_bm.at(from_card)) { 77 if (par) { 78 if (_bm.par_at_put(from_card, 1)) { 79 Atomic::inc(&_occupied); 80 } 81 } else { 82 _bm.at_put(from_card, 1); 83 _occupied++; 84 } 85 } 86 } 87 88 void add_reference_work(OopOrNarrowOopStar from, bool par) { 89 // Must make this robust in case "from" is not in "_hr", because of 90 // concurrency. 91 92 HeapRegion* loc_hr = hr(); 93 // If the test below fails, then this table was reused concurrently 94 // with this operation. This is OK, since the old table was coarsened, 95 // and adding a bit to the new table is never incorrect. 96 // If the table used to belong to a continues humongous region and is 97 // now reused for the corresponding start humongous region, we need to 98 // make sure that we detect this. Thus, we call is_in_reserved_raw() 99 // instead of just is_in_reserved() here. 100 if (loc_hr->is_in_reserved(from)) { 101 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); 102 CardIdx_t from_card = (CardIdx_t) 103 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); 104 105 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion, 106 "Must be in range."); 107 add_card_work(from_card, par); 108 } 109 } 110 111 public: 112 113 HeapRegion* hr() const { return _hr; } 114 115 jint occupied() const { 116 // Overkill, but if we ever need it... 117 // guarantee(_occupied == _bm.count_one_bits(), "Check"); 118 return _occupied; 119 } 120 121 void init(HeapRegion* hr, bool clear_links_to_all_list) { 122 if (clear_links_to_all_list) { 123 set_next(NULL); 124 set_prev(NULL); 125 } 126 _collision_list_next = NULL; 127 _occupied = 0; 128 _bm.clear(); 129 // Make sure that the bitmap clearing above has been finished before publishing 130 // this PRT to concurrent threads. 131 OrderAccess::release_store_ptr(&_hr, hr); 132 } 133 134 void add_reference(OopOrNarrowOopStar from) { 135 add_reference_work(from, /*parallel*/ true); 136 } 137 138 void seq_add_reference(OopOrNarrowOopStar from) { 139 add_reference_work(from, /*parallel*/ false); 140 } 141 142 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { 143 HeapWord* hr_bot = hr()->bottom(); 144 size_t hr_first_card_index = ctbs->index_for(hr_bot); 145 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); 146 recount_occupied(); 147 } 148 149 void add_card(CardIdx_t from_card_index) { 150 add_card_work(from_card_index, /*parallel*/ true); 151 } 152 153 void seq_add_card(CardIdx_t from_card_index) { 154 add_card_work(from_card_index, /*parallel*/ false); 155 } 156 157 // (Destructively) union the bitmap of the current table into the given 158 // bitmap (which is assumed to be of the same size.) 159 void union_bitmap_into(BitMap* bm) { 160 bm->set_union(_bm); 161 } 162 163 // Mem size in bytes. 164 size_t mem_size() const { 165 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; 166 } 167 168 // Requires "from" to be in "hr()". 169 bool contains_reference(OopOrNarrowOopStar from) const { 170 assert(hr()->is_in_reserved(from), "Precondition."); 171 size_t card_ind = pointer_delta(from, hr()->bottom(), 172 CardTableModRefBS::card_size); 173 return _bm.at(card_ind); 174 } 175 176 // Bulk-free the PRTs from prt to last, assumes that they are 177 // linked together using their _next field. 178 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { 179 while (true) { 180 PerRegionTable* fl = _free_list; 181 last->set_next(fl); 182 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); 183 if (res == fl) { 184 return; 185 } 186 } 187 ShouldNotReachHere(); 188 } 189 190 static void free(PerRegionTable* prt) { 191 bulk_free(prt, prt); 192 } 193 194 // Returns an initialized PerRegionTable instance. 195 static PerRegionTable* alloc(HeapRegion* hr) { 196 PerRegionTable* fl = _free_list; 197 while (fl != NULL) { 198 PerRegionTable* nxt = fl->next(); 199 PerRegionTable* res = 200 (PerRegionTable*) 201 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); 202 if (res == fl) { 203 fl->init(hr, true); 204 return fl; 205 } else { 206 fl = _free_list; 207 } 208 } 209 assert(fl == NULL, "Loop condition."); 210 return new PerRegionTable(hr); 211 } 212 213 PerRegionTable* next() const { return _next; } 214 void set_next(PerRegionTable* next) { _next = next; } 215 PerRegionTable* prev() const { return _prev; } 216 void set_prev(PerRegionTable* prev) { _prev = prev; } 217 218 // Accessor and Modification routines for the pointer for the 219 // singly linked collision list that links the PRTs within the 220 // OtherRegionsTable::_fine_grain_regions hash table. 221 // 222 // It might be useful to also make the collision list doubly linked 223 // to avoid iteration over the collisions list during scrubbing/deletion. 224 // OTOH there might not be many collisions. 225 226 PerRegionTable* collision_list_next() const { 227 return _collision_list_next; 228 } 229 230 void set_collision_list_next(PerRegionTable* next) { 231 _collision_list_next = next; 232 } 233 234 PerRegionTable** collision_list_next_addr() { 235 return &_collision_list_next; 236 } 237 238 static size_t fl_mem_size() { 239 PerRegionTable* cur = _free_list; 240 size_t res = 0; 241 while (cur != NULL) { 242 res += cur->mem_size(); 243 cur = cur->next(); 244 } 245 return res; 246 } 247 248 static void test_fl_mem_size(); 249 }; 250 251 PerRegionTable* PerRegionTable::_free_list = NULL; 252 253 size_t OtherRegionsTable::_max_fine_entries = 0; 254 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; 255 size_t OtherRegionsTable::_fine_eviction_stride = 0; 256 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; 257 258 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) : 259 _g1h(G1CollectedHeap::heap()), 260 _hr(hr), _m(m), 261 _coarse_map(G1CollectedHeap::heap()->max_regions(), 262 false /* in-resource-area */), 263 _fine_grain_regions(NULL), 264 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL), 265 _n_fine_entries(0), _n_coarse_entries(0), 266 _fine_eviction_start(0), 267 _sparse_table(hr) 268 { 269 typedef PerRegionTable* PerRegionTablePtr; 270 271 if (_max_fine_entries == 0) { 272 assert(_mod_max_fine_entries_mask == 0, "Both or none."); 273 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); 274 _max_fine_entries = (size_t)1 << max_entries_log; 275 _mod_max_fine_entries_mask = _max_fine_entries - 1; 276 277 assert(_fine_eviction_sample_size == 0 278 && _fine_eviction_stride == 0, "All init at same time."); 279 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); 280 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; 281 } 282 283 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, 284 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 285 286 if (_fine_grain_regions == NULL) { 287 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, 288 "Failed to allocate _fine_grain_entries."); 289 } 290 291 for (size_t i = 0; i < _max_fine_entries; i++) { 292 _fine_grain_regions[i] = NULL; 293 } 294 } 295 296 void OtherRegionsTable::link_to_all(PerRegionTable* prt) { 297 // We always append to the beginning of the list for convenience; 298 // the order of entries in this list does not matter. 299 if (_first_all_fine_prts != NULL) { 300 assert(_first_all_fine_prts->prev() == NULL, "invariant"); 301 _first_all_fine_prts->set_prev(prt); 302 prt->set_next(_first_all_fine_prts); 303 } else { 304 // this is the first element we insert. Adjust the "last" pointer 305 _last_all_fine_prts = prt; 306 assert(prt->next() == NULL, "just checking"); 307 } 308 // the new element is always the first element without a predecessor 309 prt->set_prev(NULL); 310 _first_all_fine_prts = prt; 311 312 assert(prt->prev() == NULL, "just checking"); 313 assert(_first_all_fine_prts == prt, "just checking"); 314 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 315 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 316 "just checking"); 317 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 318 "just checking"); 319 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 320 "just checking"); 321 } 322 323 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { 324 if (prt->prev() != NULL) { 325 assert(_first_all_fine_prts != prt, "just checking"); 326 prt->prev()->set_next(prt->next()); 327 // removing the last element in the list? 328 if (_last_all_fine_prts == prt) { 329 _last_all_fine_prts = prt->prev(); 330 } 331 } else { 332 assert(_first_all_fine_prts == prt, "just checking"); 333 _first_all_fine_prts = prt->next(); 334 // list is empty now? 335 if (_first_all_fine_prts == NULL) { 336 _last_all_fine_prts = NULL; 337 } 338 } 339 340 if (prt->next() != NULL) { 341 prt->next()->set_prev(prt->prev()); 342 } 343 344 prt->set_next(NULL); 345 prt->set_prev(NULL); 346 347 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 348 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 349 "just checking"); 350 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 351 "just checking"); 352 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 353 "just checking"); 354 } 355 356 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { 357 uint cur_hrm_ind = _hr->hrm_index(); 358 359 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); 360 361 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { 362 assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from)); 363 return; 364 } 365 366 // Note that this may be a continued H region. 367 HeapRegion* from_hr = _g1h->heap_region_containing(from); 368 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); 369 370 // If the region is already coarsened, return. 371 if (_coarse_map.at(from_hrm_ind)) { 372 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from)); 373 return; 374 } 375 376 // Otherwise find a per-region table to add it to. 377 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; 378 PerRegionTable* prt = find_region_table(ind, from_hr); 379 if (prt == NULL) { 380 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); 381 // Confirm that it's really not there... 382 prt = find_region_table(ind, from_hr); 383 if (prt == NULL) { 384 385 uintptr_t from_hr_bot_card_index = 386 uintptr_t(from_hr->bottom()) 387 >> CardTableModRefBS::card_shift; 388 CardIdx_t card_index = from_card - from_hr_bot_card_index; 389 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, 390 "Must be in range."); 391 if (G1HRRSUseSparseTable && 392 _sparse_table.add_card(from_hrm_ind, card_index)) { 393 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from)); 394 return; 395 } 396 397 if (_n_fine_entries == _max_fine_entries) { 398 prt = delete_region_table(); 399 // There is no need to clear the links to the 'all' list here: 400 // prt will be reused immediately, i.e. remain in the 'all' list. 401 prt->init(from_hr, false /* clear_links_to_all_list */); 402 } else { 403 prt = PerRegionTable::alloc(from_hr); 404 link_to_all(prt); 405 } 406 407 PerRegionTable* first_prt = _fine_grain_regions[ind]; 408 prt->set_collision_list_next(first_prt); 409 // The assignment into _fine_grain_regions allows the prt to 410 // start being used concurrently. In addition to 411 // collision_list_next which must be visible (else concurrent 412 // parsing of the list, if any, may fail to see other entries), 413 // the content of the prt must be visible (else for instance 414 // some mark bits may not yet seem cleared or a 'later' update 415 // performed by a concurrent thread could be undone when the 416 // zeroing becomes visible). This requires store ordering. 417 OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt); 418 _n_fine_entries++; 419 420 if (G1HRRSUseSparseTable) { 421 // Transfer from sparse to fine-grain. 422 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); 423 assert(sprt_entry != NULL, "There should have been an entry"); 424 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) { 425 CardIdx_t c = sprt_entry->card(i); 426 if (c != SparsePRTEntry::NullEntry) { 427 prt->add_card(c); 428 } 429 } 430 // Now we can delete the sparse entry. 431 bool res = _sparse_table.delete_entry(from_hrm_ind); 432 assert(res, "It should have been there."); 433 } 434 } 435 assert(prt != NULL && prt->hr() == from_hr, "consequence"); 436 } 437 // Note that we can't assert "prt->hr() == from_hr", because of the 438 // possibility of concurrent reuse. But see head comment of 439 // OtherRegionsTable for why this is OK. 440 assert(prt != NULL, "Inv"); 441 442 prt->add_reference(from); 443 assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from)); 444 } 445 446 PerRegionTable* 447 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { 448 assert(ind < _max_fine_entries, "Preconditions."); 449 PerRegionTable* prt = _fine_grain_regions[ind]; 450 while (prt != NULL && prt->hr() != hr) { 451 prt = prt->collision_list_next(); 452 } 453 // Loop postcondition is the method postcondition. 454 return prt; 455 } 456 457 jint OtherRegionsTable::_n_coarsenings = 0; 458 459 PerRegionTable* OtherRegionsTable::delete_region_table() { 460 assert(_m->owned_by_self(), "Precondition"); 461 assert(_n_fine_entries == _max_fine_entries, "Precondition"); 462 PerRegionTable* max = NULL; 463 jint max_occ = 0; 464 PerRegionTable** max_prev = NULL; 465 size_t max_ind; 466 467 size_t i = _fine_eviction_start; 468 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { 469 size_t ii = i; 470 // Make sure we get a non-NULL sample. 471 while (_fine_grain_regions[ii] == NULL) { 472 ii++; 473 if (ii == _max_fine_entries) ii = 0; 474 guarantee(ii != i, "We must find one."); 475 } 476 PerRegionTable** prev = &_fine_grain_regions[ii]; 477 PerRegionTable* cur = *prev; 478 while (cur != NULL) { 479 jint cur_occ = cur->occupied(); 480 if (max == NULL || cur_occ > max_occ) { 481 max = cur; 482 max_prev = prev; 483 max_ind = i; 484 max_occ = cur_occ; 485 } 486 prev = cur->collision_list_next_addr(); 487 cur = cur->collision_list_next(); 488 } 489 i = i + _fine_eviction_stride; 490 if (i >= _n_fine_entries) i = i - _n_fine_entries; 491 } 492 493 _fine_eviction_start++; 494 495 if (_fine_eviction_start >= _n_fine_entries) { 496 _fine_eviction_start -= _n_fine_entries; 497 } 498 499 guarantee(max != NULL, "Since _n_fine_entries > 0"); 500 guarantee(max_prev != NULL, "Since max != NULL."); 501 502 // Set the corresponding coarse bit. 503 size_t max_hrm_index = (size_t) max->hr()->hrm_index(); 504 if (!_coarse_map.at(max_hrm_index)) { 505 _coarse_map.at_put(max_hrm_index, true); 506 _n_coarse_entries++; 507 } 508 509 // Unsplice. 510 *max_prev = max->collision_list_next(); 511 Atomic::inc(&_n_coarsenings); 512 _n_fine_entries--; 513 return max; 514 } 515 516 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, 517 BitMap* region_bm, BitMap* card_bm) { 518 // First eliminated garbage regions from the coarse map. 519 log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index()); 520 521 assert(_coarse_map.size() == region_bm->size(), "Precondition"); 522 log_develop_trace(gc, remset, scrub)(" Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries); 523 _coarse_map.set_intersection(*region_bm); 524 _n_coarse_entries = _coarse_map.count_one_bits(); 525 log_develop_trace(gc, remset, scrub)(" after = " SIZE_FORMAT ".", _n_coarse_entries); 526 527 // Now do the fine-grained maps. 528 for (size_t i = 0; i < _max_fine_entries; i++) { 529 PerRegionTable* cur = _fine_grain_regions[i]; 530 PerRegionTable** prev = &_fine_grain_regions[i]; 531 while (cur != NULL) { 532 PerRegionTable* nxt = cur->collision_list_next(); 533 // If the entire region is dead, eliminate. 534 log_develop_trace(gc, remset, scrub)(" For other region %u:", cur->hr()->hrm_index()); 535 if (!region_bm->at((size_t) cur->hr()->hrm_index())) { 536 *prev = nxt; 537 cur->set_collision_list_next(NULL); 538 _n_fine_entries--; 539 log_develop_trace(gc, remset, scrub)(" deleted via region map."); 540 unlink_from_all(cur); 541 PerRegionTable::free(cur); 542 } else { 543 // Do fine-grain elimination. 544 log_develop_trace(gc, remset, scrub)(" occ: before = %4d.", cur->occupied()); 545 cur->scrub(ctbs, card_bm); 546 log_develop_trace(gc, remset, scrub)(" after = %4d.", cur->occupied()); 547 // Did that empty the table completely? 548 if (cur->occupied() == 0) { 549 *prev = nxt; 550 cur->set_collision_list_next(NULL); 551 _n_fine_entries--; 552 unlink_from_all(cur); 553 PerRegionTable::free(cur); 554 } else { 555 prev = cur->collision_list_next_addr(); 556 } 557 } 558 cur = nxt; 559 } 560 } 561 // Since we may have deleted a from_card_cache entry from the RS, clear 562 // the FCC. 563 clear_fcc(); 564 } 565 566 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const { 567 if (limit <= (size_t)G1RSetSparseRegionEntries) { 568 return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit; 569 } else { 570 // Current uses of this method may only use values less than G1RSetSparseRegionEntries 571 // for the limit. The solution, comparing against occupied() would be too slow 572 // at this time. 573 Unimplemented(); 574 return false; 575 } 576 } 577 578 bool OtherRegionsTable::is_empty() const { 579 return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL; 580 } 581 582 size_t OtherRegionsTable::occupied() const { 583 size_t sum = occ_fine(); 584 sum += occ_sparse(); 585 sum += occ_coarse(); 586 return sum; 587 } 588 589 size_t OtherRegionsTable::occ_fine() const { 590 size_t sum = 0; 591 592 size_t num = 0; 593 PerRegionTable * cur = _first_all_fine_prts; 594 while (cur != NULL) { 595 sum += cur->occupied(); 596 cur = cur->next(); 597 num++; 598 } 599 guarantee(num == _n_fine_entries, "just checking"); 600 return sum; 601 } 602 603 size_t OtherRegionsTable::occ_coarse() const { 604 return (_n_coarse_entries * HeapRegion::CardsPerRegion); 605 } 606 607 size_t OtherRegionsTable::occ_sparse() const { 608 return _sparse_table.occupied(); 609 } 610 611 size_t OtherRegionsTable::mem_size() const { 612 size_t sum = 0; 613 // all PRTs are of the same size so it is sufficient to query only one of them. 614 if (_first_all_fine_prts != NULL) { 615 assert(_last_all_fine_prts != NULL && 616 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant"); 617 sum += _first_all_fine_prts->mem_size() * _n_fine_entries; 618 } 619 sum += (sizeof(PerRegionTable*) * _max_fine_entries); 620 sum += (_coarse_map.size_in_words() * HeapWordSize); 621 sum += (_sparse_table.mem_size()); 622 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. 623 return sum; 624 } 625 626 size_t OtherRegionsTable::static_mem_size() { 627 return G1FromCardCache::static_mem_size(); 628 } 629 630 size_t OtherRegionsTable::fl_mem_size() { 631 return PerRegionTable::fl_mem_size(); 632 } 633 634 void OtherRegionsTable::clear_fcc() { 635 G1FromCardCache::clear(_hr->hrm_index()); 636 } 637 638 void OtherRegionsTable::clear() { 639 // if there are no entries, skip this step 640 if (_first_all_fine_prts != NULL) { 641 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking"); 642 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts); 643 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0])); 644 } else { 645 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking"); 646 } 647 648 _first_all_fine_prts = _last_all_fine_prts = NULL; 649 _sparse_table.clear(); 650 _coarse_map.clear(); 651 _n_fine_entries = 0; 652 _n_coarse_entries = 0; 653 654 clear_fcc(); 655 } 656 657 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { 658 // Cast away const in this case. 659 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag); 660 return contains_reference_locked(from); 661 } 662 663 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { 664 HeapRegion* hr = _g1h->heap_region_containing(from); 665 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); 666 // Is this region in the coarse map? 667 if (_coarse_map.at(hr_ind)) return true; 668 669 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, 670 hr); 671 if (prt != NULL) { 672 return prt->contains_reference(from); 673 674 } else { 675 uintptr_t from_card = 676 (uintptr_t(from) >> CardTableModRefBS::card_shift); 677 uintptr_t hr_bot_card_index = 678 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; 679 assert(from_card >= hr_bot_card_index, "Inv"); 680 CardIdx_t card_index = from_card - hr_bot_card_index; 681 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, 682 "Must be in range."); 683 return _sparse_table.contains_card(hr_ind, card_index); 684 } 685 } 686 687 void 688 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 689 _sparse_table.do_cleanup_work(hrrs_cleanup_task); 690 } 691 692 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, 693 HeapRegion* hr) 694 : _bot(bot), 695 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never), 696 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) { 697 reset_for_par_iteration(); 698 } 699 700 void HeapRegionRemSet::setup_remset_size() { 701 // Setup sparse and fine-grain tables sizes. 702 // table_size = base * (log(region_size / 1M) + 1) 703 const int LOG_M = 20; 704 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0); 705 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) { 706 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1); 707 } 708 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) { 709 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1); 710 } 711 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); 712 } 713 714 bool HeapRegionRemSet::claim_iter() { 715 if (_iter_state != Unclaimed) return false; 716 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); 717 return (res == Unclaimed); 718 } 719 720 void HeapRegionRemSet::set_iter_complete() { 721 _iter_state = Complete; 722 } 723 724 bool HeapRegionRemSet::iter_is_complete() { 725 return _iter_state == Complete; 726 } 727 728 #ifndef PRODUCT 729 void HeapRegionRemSet::print() { 730 HeapRegionRemSetIterator iter(this); 731 size_t card_index; 732 while (iter.has_next(card_index)) { 733 HeapWord* card_start = _bot->address_for_index(card_index); 734 tty->print_cr(" Card " PTR_FORMAT, p2i(card_start)); 735 } 736 if (iter.n_yielded() != occupied()) { 737 tty->print_cr("Yielded disagrees with occupied:"); 738 tty->print_cr(" " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6) 739 " coarse, " SIZE_FORMAT_W(6) " fine).", 740 iter.n_yielded(), 741 iter.n_yielded_coarse(), iter.n_yielded_fine()); 742 tty->print_cr(" " SIZE_FORMAT_W(6) " occ (" SIZE_FORMAT_W(6) 743 " coarse, " SIZE_FORMAT_W(6) " fine).", 744 occupied(), occ_coarse(), occ_fine()); 745 } 746 guarantee(iter.n_yielded() == occupied(), 747 "We should have yielded all the represented cards."); 748 } 749 #endif 750 751 void HeapRegionRemSet::cleanup() { 752 SparsePRT::cleanup_all(); 753 } 754 755 void HeapRegionRemSet::clear() { 756 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); 757 clear_locked(); 758 } 759 760 void HeapRegionRemSet::clear_locked() { 761 _code_roots.clear(); 762 _other_regions.clear(); 763 assert(occupied_locked() == 0, "Should be clear."); 764 reset_for_par_iteration(); 765 } 766 767 void HeapRegionRemSet::reset_for_par_iteration() { 768 _iter_state = Unclaimed; 769 _iter_claimed = 0; 770 // It's good to check this to make sure that the two methods are in sync. 771 assert(verify_ready_for_par_iteration(), "post-condition"); 772 } 773 774 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, 775 BitMap* region_bm, BitMap* card_bm) { 776 _other_regions.scrub(ctbs, region_bm, card_bm); 777 } 778 779 // Code roots support 780 // 781 // The code root set is protected by two separate locking schemes 782 // When at safepoint the per-hrrs lock must be held during modifications 783 // except when doing a full gc. 784 // When not at safepoint the CodeCache_lock must be held during modifications. 785 // When concurrent readers access the contains() function 786 // (during the evacuation phase) no removals are allowed. 787 788 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { 789 assert(nm != NULL, "sanity"); 790 // Optimistic unlocked contains-check 791 if (!_code_roots.contains(nm)) { 792 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag); 793 add_strong_code_root_locked(nm); 794 } 795 } 796 797 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { 798 assert(nm != NULL, "sanity"); 799 _code_roots.add(nm); 800 } 801 802 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { 803 assert(nm != NULL, "sanity"); 804 assert_locked_or_safepoint(CodeCache_lock); 805 806 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); 807 _code_roots.remove(nm); 808 809 // Check that there were no duplicates 810 guarantee(!_code_roots.contains(nm), "duplicate entry found"); 811 } 812 813 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { 814 _code_roots.nmethods_do(blk); 815 } 816 817 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { 818 _code_roots.clean(hr); 819 } 820 821 size_t HeapRegionRemSet::strong_code_roots_mem_size() { 822 return _code_roots.mem_size(); 823 } 824 825 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) : 826 _hrrs(hrrs), 827 _g1h(G1CollectedHeap::heap()), 828 _coarse_map(&hrrs->_other_regions._coarse_map), 829 _bot(hrrs->_bot), 830 _is(Sparse), 831 // Set these values so that we increment to the first region. 832 _coarse_cur_region_index(-1), 833 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), 834 _cur_card_in_prt(HeapRegion::CardsPerRegion), 835 _fine_cur_prt(NULL), 836 _n_yielded_coarse(0), 837 _n_yielded_fine(0), 838 _n_yielded_sparse(0), 839 _sparse_iter(&hrrs->_other_regions._sparse_table) {} 840 841 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { 842 if (_hrrs->_other_regions._n_coarse_entries == 0) return false; 843 // Go to the next card. 844 _coarse_cur_region_cur_card++; 845 // Was the last the last card in the current region? 846 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) { 847 // Yes: find the next region. This may leave _coarse_cur_region_index 848 // Set to the last index, in which case there are no more coarse 849 // regions. 850 _coarse_cur_region_index = 851 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); 852 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { 853 _coarse_cur_region_cur_card = 0; 854 HeapWord* r_bot = 855 _g1h->region_at((uint) _coarse_cur_region_index)->bottom(); 856 _cur_region_card_offset = _bot->index_for(r_bot); 857 } else { 858 return false; 859 } 860 } 861 // If we didn't return false above, then we can yield a card. 862 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; 863 return true; 864 } 865 866 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { 867 if (fine_has_next()) { 868 _cur_card_in_prt = 869 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 870 } 871 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) { 872 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for 873 // the remembered set. 874 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) { 875 return false; 876 } 877 PerRegionTable* next_prt = _fine_cur_prt->next(); 878 switch_to_prt(next_prt); 879 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 880 } 881 882 card_index = _cur_region_card_offset + _cur_card_in_prt; 883 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion, 884 "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt); 885 return true; 886 } 887 888 bool HeapRegionRemSetIterator::fine_has_next() { 889 return _cur_card_in_prt != HeapRegion::CardsPerRegion; 890 } 891 892 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) { 893 assert(prt != NULL, "Cannot switch to NULL prt"); 894 _fine_cur_prt = prt; 895 896 HeapWord* r_bot = _fine_cur_prt->hr()->bottom(); 897 _cur_region_card_offset = _bot->index_for(r_bot); 898 899 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1. 900 // To avoid special-casing this start case, and not miss the first bitmap 901 // entry, initialize _cur_region_cur_card with -1 instead of 0. 902 _cur_card_in_prt = (size_t)-1; 903 } 904 905 bool HeapRegionRemSetIterator::has_next(size_t& card_index) { 906 switch (_is) { 907 case Sparse: { 908 if (_sparse_iter.has_next(card_index)) { 909 _n_yielded_sparse++; 910 return true; 911 } 912 // Otherwise, deliberate fall-through 913 _is = Fine; 914 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts; 915 if (initial_fine_prt != NULL) { 916 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts); 917 } 918 } 919 case Fine: 920 if (fine_has_next(card_index)) { 921 _n_yielded_fine++; 922 return true; 923 } 924 // Otherwise, deliberate fall-through 925 _is = Coarse; 926 case Coarse: 927 if (coarse_has_next(card_index)) { 928 _n_yielded_coarse++; 929 return true; 930 } 931 // Otherwise... 932 break; 933 } 934 assert(ParallelGCThreads > 1 || 935 n_yielded() == _hrrs->occupied(), 936 "Should have yielded all the cards in the rem set " 937 "(in the non-par case)."); 938 return false; 939 } 940 941 void HeapRegionRemSet::reset_for_cleanup_tasks() { 942 SparsePRT::reset_for_cleanup_tasks(); 943 } 944 945 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 946 _other_regions.do_cleanup_work(hrrs_cleanup_task); 947 } 948 949 void 950 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) { 951 SparsePRT::finish_cleanup_task(hrrs_cleanup_task); 952 } 953 954 #ifndef PRODUCT 955 void HeapRegionRemSet::test() { 956 os::sleep(Thread::current(), (jlong)5000, false); 957 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 958 959 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same 960 // hash bucket. 961 HeapRegion* hr0 = g1h->region_at(0); 962 HeapRegion* hr1 = g1h->region_at(1); 963 HeapRegion* hr2 = g1h->region_at(5); 964 HeapRegion* hr3 = g1h->region_at(6); 965 HeapRegion* hr4 = g1h->region_at(7); 966 HeapRegion* hr5 = g1h->region_at(8); 967 968 HeapWord* hr1_start = hr1->bottom(); 969 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; 970 HeapWord* hr1_last = hr1->end() - 1; 971 972 HeapWord* hr2_start = hr2->bottom(); 973 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; 974 HeapWord* hr2_last = hr2->end() - 1; 975 976 HeapWord* hr3_start = hr3->bottom(); 977 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; 978 HeapWord* hr3_last = hr3->end() - 1; 979 980 HeapRegionRemSet* hrrs = hr0->rem_set(); 981 982 // Make three references from region 0x101... 983 hrrs->add_reference((OopOrNarrowOopStar)hr1_start); 984 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid); 985 hrrs->add_reference((OopOrNarrowOopStar)hr1_last); 986 987 hrrs->add_reference((OopOrNarrowOopStar)hr2_start); 988 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid); 989 hrrs->add_reference((OopOrNarrowOopStar)hr2_last); 990 991 hrrs->add_reference((OopOrNarrowOopStar)hr3_start); 992 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid); 993 hrrs->add_reference((OopOrNarrowOopStar)hr3_last); 994 995 // Now cause a coarsening. 996 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom()); 997 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); 998 999 // Now, does iteration yield these three? 1000 HeapRegionRemSetIterator iter(hrrs); 1001 size_t sum = 0; 1002 size_t card_index; 1003 while (iter.has_next(card_index)) { 1004 HeapWord* card_start = 1005 G1CollectedHeap::heap()->bot()->address_for_index(card_index); 1006 tty->print_cr(" Card " PTR_FORMAT ".", p2i(card_start)); 1007 sum++; 1008 } 1009 guarantee(sum == 11 - 3 + 2048, "Failure"); 1010 guarantee(sum == hrrs->occupied(), "Failure"); 1011 } 1012 #endif