1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1ConcurrentRefine.hpp" 29 #include "gc/g1/heapRegionManager.inline.hpp" 30 #include "gc/g1/heapRegionRemSet.inline.hpp" 31 #include "gc/g1/sparsePRT.inline.hpp" 32 #include "memory/allocation.hpp" 33 #include "memory/padded.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/atomic.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/debug.hpp" 38 #include "utilities/formatBuffer.hpp" 39 #include "utilities/globalDefinitions.hpp" 40 #include "utilities/growableArray.hpp" 41 42 const char* HeapRegionRemSet::_state_strings[] = {"Untracked", "Updating", "Complete"}; 43 const char* HeapRegionRemSet::_short_state_strings[] = {"UNTRA", "UPDAT", "CMPLT"}; 44 45 PerRegionTable* PerRegionTable::alloc(HeapRegion* hr) { 46 PerRegionTable* fl = _free_list; 47 while (fl != NULL) { 48 PerRegionTable* nxt = fl->next(); 49 PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, nxt); 50 if (res == fl) { 51 fl->init(hr, true); 52 return fl; 53 } else { 54 fl = _free_list; 55 } 56 } 57 assert(fl == NULL, "Loop condition."); 58 return new PerRegionTable(hr); 59 } 60 61 PerRegionTable* volatile PerRegionTable::_free_list = NULL; 62 63 size_t OtherRegionsTable::_max_fine_entries = 0; 64 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; 65 size_t OtherRegionsTable::_fine_eviction_stride = 0; 66 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; 67 68 OtherRegionsTable::OtherRegionsTable(Mutex* m) : 69 _g1h(G1CollectedHeap::heap()), 70 _m(m), 71 _num_occupied(0), 72 _coarse_map(G1CollectedHeap::heap()->max_regions(), mtGC), 73 _n_coarse_entries(0), 74 _fine_grain_regions(NULL), 75 _n_fine_entries(0), 76 _first_all_fine_prts(NULL), 77 _last_all_fine_prts(NULL), 78 _fine_eviction_start(0), 79 _sparse_table() 80 { 81 typedef PerRegionTable* PerRegionTablePtr; 82 83 if (_max_fine_entries == 0) { 84 assert(_mod_max_fine_entries_mask == 0, "Both or none."); 85 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); 86 _max_fine_entries = (size_t)1 << max_entries_log; 87 _mod_max_fine_entries_mask = _max_fine_entries - 1; 88 89 assert(_fine_eviction_sample_size == 0 90 && _fine_eviction_stride == 0, "All init at same time."); 91 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); 92 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; 93 } 94 95 _fine_grain_regions = NEW_C_HEAP_ARRAY(PerRegionTablePtr, _max_fine_entries, mtGC); 96 for (size_t i = 0; i < _max_fine_entries; i++) { 97 _fine_grain_regions[i] = NULL; 98 } 99 } 100 101 void OtherRegionsTable::link_to_all(PerRegionTable* prt) { 102 // We always append to the beginning of the list for convenience; 103 // the order of entries in this list does not matter. 104 if (_first_all_fine_prts != NULL) { 105 assert(_first_all_fine_prts->prev() == NULL, "invariant"); 106 _first_all_fine_prts->set_prev(prt); 107 prt->set_next(_first_all_fine_prts); 108 } else { 109 // this is the first element we insert. Adjust the "last" pointer 110 _last_all_fine_prts = prt; 111 assert(prt->next() == NULL, "just checking"); 112 } 113 // the new element is always the first element without a predecessor 114 prt->set_prev(NULL); 115 _first_all_fine_prts = prt; 116 117 assert(prt->prev() == NULL, "just checking"); 118 assert(_first_all_fine_prts == prt, "just checking"); 119 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 120 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 121 "just checking"); 122 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 123 "just checking"); 124 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 125 "just checking"); 126 } 127 128 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { 129 if (prt->prev() != NULL) { 130 assert(_first_all_fine_prts != prt, "just checking"); 131 prt->prev()->set_next(prt->next()); 132 // removing the last element in the list? 133 if (_last_all_fine_prts == prt) { 134 _last_all_fine_prts = prt->prev(); 135 } 136 } else { 137 assert(_first_all_fine_prts == prt, "just checking"); 138 _first_all_fine_prts = prt->next(); 139 // list is empty now? 140 if (_first_all_fine_prts == NULL) { 141 _last_all_fine_prts = NULL; 142 } 143 } 144 145 if (prt->next() != NULL) { 146 prt->next()->set_prev(prt->prev()); 147 } 148 149 prt->set_next(NULL); 150 prt->set_prev(NULL); 151 152 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 153 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 154 "just checking"); 155 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 156 "just checking"); 157 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 158 "just checking"); 159 } 160 161 CardIdx_t OtherRegionsTable::card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr) { 162 assert(hr->is_in_reserved(within_region), 163 "HeapWord " PTR_FORMAT " is outside of region %u [" PTR_FORMAT ", " PTR_FORMAT ")", 164 p2i(within_region), hr->hrm_index(), p2i(hr->bottom()), p2i(hr->end())); 165 CardIdx_t result = (CardIdx_t)(pointer_delta((HeapWord*)within_region, hr->bottom()) >> (CardTable::card_shift - LogHeapWordSize)); 166 return result; 167 } 168 169 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { 170 // Note that this may be a continued H region. 171 HeapRegion* from_hr = _g1h->heap_region_containing(from); 172 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); 173 174 // If the region is already coarsened, return. 175 if (_coarse_map.at(from_hrm_ind)) { 176 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from)); 177 return; 178 } 179 180 size_t num_added_by_coarsening = 0; 181 // Otherwise find a per-region table to add it to. 182 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; 183 PerRegionTable* prt = find_region_table(ind, from_hr); 184 if (prt == NULL) { 185 MutexLocker x(_m, Mutex::_no_safepoint_check_flag); 186 // Confirm that it's really not there... 187 prt = find_region_table(ind, from_hr); 188 if (prt == NULL) { 189 190 CardIdx_t card_index = card_within_region(from, from_hr); 191 192 SparsePRT::AddCardResult result = _sparse_table.add_card(from_hrm_ind, card_index); 193 if (result != SparsePRT::overflow) { 194 if (result == SparsePRT::added) { 195 Atomic::inc(&_num_occupied, memory_order_relaxed); 196 } 197 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from)); 198 return; 199 } 200 201 if (_n_fine_entries == _max_fine_entries) { 202 prt = delete_region_table(num_added_by_coarsening); 203 // There is no need to clear the links to the 'all' list here: 204 // prt will be reused immediately, i.e. remain in the 'all' list. 205 prt->init(from_hr, false /* clear_links_to_all_list */); 206 } else { 207 prt = PerRegionTable::alloc(from_hr); 208 link_to_all(prt); 209 } 210 211 PerRegionTable* first_prt = _fine_grain_regions[ind]; 212 prt->set_collision_list_next(first_prt); 213 // The assignment into _fine_grain_regions allows the prt to 214 // start being used concurrently. In addition to 215 // collision_list_next which must be visible (else concurrent 216 // parsing of the list, if any, may fail to see other entries), 217 // the content of the prt must be visible (else for instance 218 // some mark bits may not yet seem cleared or a 'later' update 219 // performed by a concurrent thread could be undone when the 220 // zeroing becomes visible). This requires store ordering. 221 Atomic::release_store(&_fine_grain_regions[ind], prt); 222 _n_fine_entries++; 223 224 // Transfer from sparse to fine-grain. The cards from the sparse table 225 // were already added to the total in _num_occupied. 226 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); 227 assert(sprt_entry != NULL, "There should have been an entry"); 228 for (int i = 0; i < sprt_entry->num_valid_cards(); i++) { 229 CardIdx_t c = sprt_entry->card(i); 230 prt->add_card(c); 231 } 232 // Now we can delete the sparse entry. 233 bool res = _sparse_table.delete_entry(from_hrm_ind); 234 assert(res, "It should have been there."); 235 } 236 assert(prt != NULL && prt->hr() == from_hr, "consequence"); 237 } 238 // Note that we can't assert "prt->hr() == from_hr", because of the 239 // possibility of concurrent reuse. But see head comment of 240 // OtherRegionsTable for why this is OK. 241 assert(prt != NULL, "Inv"); 242 243 bool added = prt->add_reference(from); 244 if (prt->add_reference(from)) { 245 num_added_by_coarsening++; 246 } 247 Atomic::add(&_num_occupied, num_added_by_coarsening, memory_order_relaxed); 248 assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT (%d)", p2i(from), prt->contains_reference(from)); 249 } 250 251 PerRegionTable* 252 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { 253 assert(ind < _max_fine_entries, "Preconditions."); 254 PerRegionTable* prt = _fine_grain_regions[ind]; 255 while (prt != NULL && prt->hr() != hr) { 256 prt = prt->collision_list_next(); 257 } 258 // Loop postcondition is the method postcondition. 259 return prt; 260 } 261 262 jint OtherRegionsTable::_n_coarsenings = 0; 263 264 PerRegionTable* OtherRegionsTable::delete_region_table(size_t& added_by_deleted) { 265 assert(_m->owned_by_self(), "Precondition"); 266 assert(_n_fine_entries == _max_fine_entries, "Precondition"); 267 PerRegionTable* max = NULL; 268 jint max_occ = 0; 269 PerRegionTable** max_prev = NULL; 270 size_t max_ind; 271 272 size_t i = _fine_eviction_start; 273 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { 274 size_t ii = i; 275 // Make sure we get a non-NULL sample. 276 while (_fine_grain_regions[ii] == NULL) { 277 ii++; 278 if (ii == _max_fine_entries) ii = 0; 279 guarantee(ii != i, "We must find one."); 280 } 281 PerRegionTable** prev = &_fine_grain_regions[ii]; 282 PerRegionTable* cur = *prev; 283 while (cur != NULL) { 284 jint cur_occ = cur->occupied(); 285 if (max == NULL || cur_occ > max_occ) { 286 max = cur; 287 max_prev = prev; 288 max_ind = i; 289 max_occ = cur_occ; 290 } 291 prev = cur->collision_list_next_addr(); 292 cur = cur->collision_list_next(); 293 } 294 i = i + _fine_eviction_stride; 295 if (i >= _n_fine_entries) i = i - _n_fine_entries; 296 } 297 298 _fine_eviction_start++; 299 300 if (_fine_eviction_start >= _n_fine_entries) { 301 _fine_eviction_start -= _n_fine_entries; 302 } 303 304 guarantee(max != NULL, "Since _n_fine_entries > 0"); 305 guarantee(max_prev != NULL, "Since max != NULL."); 306 307 // Set the corresponding coarse bit. 308 size_t max_hrm_index = (size_t) max->hr()->hrm_index(); 309 if (!_coarse_map.at(max_hrm_index)) { 310 _coarse_map.at_put(max_hrm_index, true); 311 _n_coarse_entries++; 312 } 313 314 added_by_deleted = HeapRegion::CardsPerRegion - max_occ; 315 // Unsplice. 316 *max_prev = max->collision_list_next(); 317 Atomic::inc(&_n_coarsenings); 318 _n_fine_entries--; 319 return max; 320 } 321 322 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const { 323 return occupied() <= limit; 324 } 325 326 bool OtherRegionsTable::is_empty() const { 327 return occupied() == 0; 328 } 329 330 size_t OtherRegionsTable::occupied() const { 331 return _num_occupied; 332 } 333 334 size_t OtherRegionsTable::mem_size() const { 335 size_t sum = 0; 336 // all PRTs are of the same size so it is sufficient to query only one of them. 337 if (_first_all_fine_prts != NULL) { 338 assert(_last_all_fine_prts != NULL && 339 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant"); 340 sum += _first_all_fine_prts->mem_size() * _n_fine_entries; 341 } 342 sum += (sizeof(PerRegionTable*) * _max_fine_entries); 343 sum += (_coarse_map.size_in_words() * HeapWordSize); 344 sum += (_sparse_table.mem_size()); 345 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. 346 return sum; 347 } 348 349 size_t OtherRegionsTable::static_mem_size() { 350 return G1FromCardCache::static_mem_size(); 351 } 352 353 size_t OtherRegionsTable::fl_mem_size() { 354 return PerRegionTable::fl_mem_size(); 355 } 356 357 void OtherRegionsTable::clear() { 358 // if there are no entries, skip this step 359 if (_first_all_fine_prts != NULL) { 360 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking"); 361 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts); 362 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0])); 363 } else { 364 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking"); 365 } 366 367 _first_all_fine_prts = _last_all_fine_prts = NULL; 368 _sparse_table.clear(); 369 if (_n_coarse_entries > 0) { 370 _coarse_map.clear(); 371 } 372 _n_fine_entries = 0; 373 _n_coarse_entries = 0; 374 375 _num_occupied = 0; 376 } 377 378 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { 379 // Cast away const in this case. 380 MutexLocker x((Mutex*)_m, Mutex::_no_safepoint_check_flag); 381 return contains_reference_locked(from); 382 } 383 384 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { 385 HeapRegion* hr = _g1h->heap_region_containing(from); 386 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); 387 // Is this region in the coarse map? 388 if (_coarse_map.at(hr_ind)) return true; 389 390 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, 391 hr); 392 if (prt != NULL) { 393 return prt->contains_reference(from); 394 395 } else { 396 CardIdx_t card_index = card_within_region(from, hr); 397 return _sparse_table.contains_card(hr_ind, card_index); 398 } 399 } 400 401 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, 402 HeapRegion* hr) 403 : _bot(bot), 404 _code_roots(), 405 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Mutex::_safepoint_check_never), 406 _other_regions(&_m), 407 _hr(hr), 408 _state(Untracked) 409 { 410 } 411 412 void HeapRegionRemSet::clear_fcc() { 413 G1FromCardCache::clear(_hr->hrm_index()); 414 } 415 416 void HeapRegionRemSet::setup_remset_size() { 417 const int LOG_M = 20; 418 guarantee(HeapRegion::LogOfHRGrainBytes >= LOG_M, "Code assumes the region size >= 1M, but is " SIZE_FORMAT "B", HeapRegion::GrainBytes); 419 420 int region_size_log_mb = HeapRegion::LogOfHRGrainBytes - LOG_M; 421 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) { 422 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * ((size_t)1 << (region_size_log_mb + 1)); 423 } 424 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) { 425 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1); 426 } 427 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); 428 } 429 430 void HeapRegionRemSet::clear(bool only_cardset) { 431 MutexLocker x(&_m, Mutex::_no_safepoint_check_flag); 432 clear_locked(only_cardset); 433 } 434 435 void HeapRegionRemSet::clear_locked(bool only_cardset) { 436 if (!only_cardset) { 437 _code_roots.clear(); 438 } 439 clear_fcc(); 440 _other_regions.clear(); 441 set_state_empty(); 442 assert(occupied() == 0, "Should be clear."); 443 } 444 445 // Code roots support 446 // 447 // The code root set is protected by two separate locking schemes 448 // When at safepoint the per-hrrs lock must be held during modifications 449 // except when doing a full gc. 450 // When not at safepoint the CodeCache_lock must be held during modifications. 451 // When concurrent readers access the contains() function 452 // (during the evacuation phase) no removals are allowed. 453 454 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { 455 assert(nm != NULL, "sanity"); 456 assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()), 457 "should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s", 458 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())); 459 // Optimistic unlocked contains-check 460 if (!_code_roots.contains(nm)) { 461 MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag); 462 add_strong_code_root_locked(nm); 463 } 464 } 465 466 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { 467 assert(nm != NULL, "sanity"); 468 assert((CodeCache_lock->owned_by_self() || 469 (SafepointSynchronize::is_at_safepoint() && 470 (_m.owned_by_self() || Thread::current()->is_VM_thread()))), 471 "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s", 472 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), 473 BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread())); 474 _code_roots.add(nm); 475 } 476 477 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { 478 assert(nm != NULL, "sanity"); 479 assert_locked_or_safepoint(CodeCache_lock); 480 481 MutexLocker ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); 482 _code_roots.remove(nm); 483 484 // Check that there were no duplicates 485 guarantee(!_code_roots.contains(nm), "duplicate entry found"); 486 } 487 488 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { 489 _code_roots.nmethods_do(blk); 490 } 491 492 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { 493 _code_roots.clean(hr); 494 } 495 496 size_t HeapRegionRemSet::strong_code_roots_mem_size() { 497 return _code_roots.mem_size(); 498 }