1 /* 2 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/heapRegionRemSet.hpp" 28 #include "gc/g1/heapRegionSet.inline.hpp" 29 30 uint FreeRegionList::_unrealistically_long_length = 0; 31 32 #ifndef PRODUCT 33 void HeapRegionSetBase::verify_region(HeapRegion* hr) { 34 assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index()); 35 assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions 36 assert(hr->is_humongous() == regions_humongous(), "Wrong humongous state for region %u and set %s", hr->hrm_index(), name()); 37 assert(hr->is_free() == regions_free(), "Wrong free state for region %u and set %s", hr->hrm_index(), name()); 38 assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name()); 39 assert(!hr->is_empty() || hr->is_free() || hr->is_archive(), 40 "Empty region %u is not free or archive for set %s", hr->hrm_index(), name()); 41 assert(hr->rem_set()->verify_ready_for_par_iteration(), "Wrong iteration state %u", hr->hrm_index()); 42 } 43 #endif 44 45 void HeapRegionSetBase::verify() { 46 // It's important that we also observe the MT safety protocol even 47 // for the verification calls. If we do verification without the 48 // appropriate locks and the set changes underneath our feet 49 // verification might fail and send us on a wild goose chase. 50 check_mt_safety(); 51 52 guarantee_heap_region_set(( is_empty() && length() == 0 && total_capacity_bytes() == 0) || 53 (!is_empty() && length() > 0 && total_capacity_bytes() > 0) , 54 "invariant"); 55 } 56 57 void HeapRegionSetBase::verify_start() { 58 // See comment in verify() about MT safety and verification. 59 check_mt_safety(); 60 assert_heap_region_set(!_verify_in_progress, "verification should not be in progress"); 61 62 // Do the basic verification first before we do the checks over the regions. 63 HeapRegionSetBase::verify(); 64 65 _verify_in_progress = true; 66 } 67 68 void HeapRegionSetBase::verify_end() { 69 // See comment in verify() about MT safety and verification. 70 check_mt_safety(); 71 assert_heap_region_set(_verify_in_progress, "verification should be in progress"); 72 73 _verify_in_progress = false; 74 } 75 76 void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) { 77 out->cr(); 78 out->print_cr("Set: %s (" PTR_FORMAT ")", name(), p2i(this)); 79 out->print_cr(" Region Assumptions"); 80 out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous())); 81 out->print_cr(" free : %s", BOOL_TO_STR(regions_free())); 82 out->print_cr(" Attributes"); 83 out->print_cr(" length : %14u", length()); 84 out->print_cr(" total capacity : " SIZE_FORMAT_W(14) " bytes", 85 total_capacity_bytes()); 86 } 87 88 HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker) 89 : _name(name), _verify_in_progress(false), 90 _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker), 91 _count() 92 { } 93 94 void FreeRegionList::set_unrealistically_long_length(uint len) { 95 guarantee(_unrealistically_long_length == 0, "should only be set once"); 96 _unrealistically_long_length = len; 97 } 98 99 void FreeRegionList::remove_all() { 100 check_mt_safety(); 101 verify_optional(); 102 103 HeapRegion* curr = _head; 104 while (curr != NULL) { 105 verify_region(curr); 106 107 HeapRegion* next = curr->next(); 108 curr->set_next(NULL); 109 curr->set_prev(NULL); 110 curr->set_containing_set(NULL); 111 curr = next; 112 } 113 clear(); 114 115 verify_optional(); 116 } 117 118 void FreeRegionList::add_ordered(FreeRegionList* from_list) { 119 check_mt_safety(); 120 from_list->check_mt_safety(); 121 122 verify_optional(); 123 from_list->verify_optional(); 124 125 if (from_list->is_empty()) { 126 return; 127 } 128 129 #ifdef ASSERT 130 FreeRegionListIterator iter(from_list); 131 while (iter.more_available()) { 132 HeapRegion* hr = iter.get_next(); 133 // In set_containing_set() we check that we either set the value 134 // from NULL to non-NULL or vice versa to catch bugs. So, we have 135 // to NULL it first before setting it to the value. 136 hr->set_containing_set(NULL); 137 hr->set_containing_set(this); 138 } 139 #endif // ASSERT 140 141 if (is_empty()) { 142 assert_free_region_list(length() == 0 && _tail == NULL, "invariant"); 143 _head = from_list->_head; 144 _tail = from_list->_tail; 145 } else { 146 HeapRegion* curr_to = _head; 147 HeapRegion* curr_from = from_list->_head; 148 149 while (curr_from != NULL) { 150 while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) { 151 curr_to = curr_to->next(); 152 } 153 154 if (curr_to == NULL) { 155 // The rest of the from list should be added as tail 156 _tail->set_next(curr_from); 157 curr_from->set_prev(_tail); 158 curr_from = NULL; 159 } else { 160 HeapRegion* next_from = curr_from->next(); 161 162 curr_from->set_next(curr_to); 163 curr_from->set_prev(curr_to->prev()); 164 if (curr_to->prev() == NULL) { 165 _head = curr_from; 166 } else { 167 curr_to->prev()->set_next(curr_from); 168 } 169 curr_to->set_prev(curr_from); 170 171 curr_from = next_from; 172 } 173 } 174 175 if (_tail->hrm_index() < from_list->_tail->hrm_index()) { 176 _tail = from_list->_tail; 177 } 178 } 179 180 _count.increment(from_list->length(), from_list->total_capacity_bytes()); 181 from_list->clear(); 182 183 verify_optional(); 184 from_list->verify_optional(); 185 } 186 187 void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) { 188 check_mt_safety(); 189 assert_free_region_list(num_regions >= 1, "pre-condition"); 190 assert_free_region_list(!is_empty(), "pre-condition"); 191 192 verify_optional(); 193 DEBUG_ONLY(uint old_length = length();) 194 195 HeapRegion* curr = first; 196 uint count = 0; 197 while (count < num_regions) { 198 verify_region(curr); 199 HeapRegion* next = curr->next(); 200 HeapRegion* prev = curr->prev(); 201 202 assert(count < num_regions, 203 "[%s] should not come across more regions " 204 "pending for removal than num_regions: %u", 205 name(), num_regions); 206 207 if (prev == NULL) { 208 assert_free_region_list(_head == curr, "invariant"); 209 _head = next; 210 } else { 211 assert_free_region_list(_head != curr, "invariant"); 212 prev->set_next(next); 213 } 214 if (next == NULL) { 215 assert_free_region_list(_tail == curr, "invariant"); 216 _tail = prev; 217 } else { 218 assert_free_region_list(_tail != curr, "invariant"); 219 next->set_prev(prev); 220 } 221 if (_last == curr) { 222 _last = NULL; 223 } 224 225 curr->set_next(NULL); 226 curr->set_prev(NULL); 227 remove(curr); 228 229 count++; 230 curr = next; 231 } 232 233 assert(count == num_regions, 234 "[%s] count: %u should be == num_regions: %u", 235 name(), count, num_regions); 236 assert(length() + num_regions == old_length, 237 "[%s] new length should be consistent " 238 "new length: %u old length: %u num_regions: %u", 239 name(), length(), old_length, num_regions); 240 241 verify_optional(); 242 } 243 244 void FreeRegionList::verify() { 245 // See comment in HeapRegionSetBase::verify() about MT safety and 246 // verification. 247 check_mt_safety(); 248 249 // This will also do the basic verification too. 250 verify_start(); 251 252 verify_list(); 253 254 verify_end(); 255 } 256 257 void FreeRegionList::clear() { 258 _count = HeapRegionSetCount(); 259 _head = NULL; 260 _tail = NULL; 261 _last = NULL; 262 } 263 264 void FreeRegionList::verify_list() { 265 HeapRegion* curr = _head; 266 HeapRegion* prev1 = NULL; 267 HeapRegion* prev0 = NULL; 268 uint count = 0; 269 size_t capacity = 0; 270 uint last_index = 0; 271 272 guarantee(_head == NULL || _head->prev() == NULL, "_head should not have a prev"); 273 while (curr != NULL) { 274 verify_region(curr); 275 276 count++; 277 guarantee(count < _unrealistically_long_length, 278 "[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: " PTR_FORMAT " prev0: " PTR_FORMAT " " "prev1: " PTR_FORMAT " length: %u", 279 name(), count, p2i(curr), p2i(prev0), p2i(prev1), length()); 280 281 if (curr->next() != NULL) { 282 guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up"); 283 } 284 guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted"); 285 last_index = curr->hrm_index(); 286 287 capacity += curr->capacity(); 288 289 prev1 = prev0; 290 prev0 = curr; 291 curr = curr->next(); 292 } 293 294 guarantee(_tail == prev0, "Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index()); 295 guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next"); 296 guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count); 297 guarantee(total_capacity_bytes() == capacity, "%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 298 name(), total_capacity_bytes(), capacity); 299 } 300 301 // Note on the check_mt_safety() methods below: 302 // 303 // Verification of the "master" heap region sets / lists that are 304 // maintained by G1CollectedHeap is always done during a STW pause and 305 // by the VM thread at the start / end of the pause. The standard 306 // verification methods all assert check_mt_safety(). This is 307 // important as it ensures that verification is done without 308 // concurrent updates taking place at the same time. It follows, that, 309 // for the "master" heap region sets / lists, the check_mt_safety() 310 // method should include the VM thread / STW case. 311 312 void MasterFreeRegionListMtSafeChecker::check() { 313 // Master Free List MT safety protocol: 314 // (a) If we're at a safepoint, operations on the master free list 315 // should be invoked by either the VM thread (which will serialize 316 // them) or by the GC workers while holding the 317 // FreeList_lock. 318 // (b) If we're not at a safepoint, operations on the master free 319 // list should be invoked while holding the Heap_lock. 320 321 if (SafepointSynchronize::is_at_safepoint()) { 322 guarantee(Thread::current()->is_VM_thread() || 323 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint"); 324 } else { 325 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint"); 326 } 327 } 328 329 void SecondaryFreeRegionListMtSafeChecker::check() { 330 // Secondary Free List MT safety protocol: 331 // Operations on the secondary free list should always be invoked 332 // while holding the SecondaryFreeList_lock. 333 334 guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol"); 335 } 336 337 void OldRegionSetMtSafeChecker::check() { 338 // Master Old Set MT safety protocol: 339 // (a) If we're at a safepoint, operations on the master old set 340 // should be invoked: 341 // - by the VM thread (which will serialize them), or 342 // - by the GC workers while holding the FreeList_lock, if we're 343 // at a safepoint for an evacuation pause (this lock is taken 344 // anyway when an GC alloc region is retired so that a new one 345 // is allocated from the free list), or 346 // - by the GC workers while holding the OldSets_lock, if we're at a 347 // safepoint for a cleanup pause. 348 // (b) If we're not at a safepoint, operations on the master old set 349 // should be invoked while holding the Heap_lock. 350 351 if (SafepointSynchronize::is_at_safepoint()) { 352 guarantee(Thread::current()->is_VM_thread() 353 || FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(), 354 "master old set MT safety protocol at a safepoint"); 355 } else { 356 guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint"); 357 } 358 } 359 360 void HumongousRegionSetMtSafeChecker::check() { 361 // Humongous Set MT safety protocol: 362 // (a) If we're at a safepoint, operations on the master humongous 363 // set should be invoked by either the VM thread (which will 364 // serialize them) or by the GC workers while holding the 365 // OldSets_lock. 366 // (b) If we're not at a safepoint, operations on the master 367 // humongous set should be invoked while holding the Heap_lock. 368 369 if (SafepointSynchronize::is_at_safepoint()) { 370 guarantee(Thread::current()->is_VM_thread() || 371 OldSets_lock->owned_by_self(), 372 "master humongous set MT safety protocol at a safepoint"); 373 } else { 374 guarantee(Heap_lock->owned_by_self(), 375 "master humongous set MT safety protocol outside a safepoint"); 376 } 377 } 378 379 void FreeRegionList_test() { 380 FreeRegionList l("test"); 381 382 const uint num_regions_in_test = 5; 383 // Create a fake heap. It does not need to be valid, as the HeapRegion constructor 384 // does not access it. 385 MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords); 386 // Allocate a fake BOT because the HeapRegion constructor initializes 387 // the BOT. 388 size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size()); 389 HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC); 390 ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size())); 391 G1RegionToSpaceMapper* bot_storage = 392 G1RegionToSpaceMapper::create_mapper(bot_rs, 393 bot_rs.size(), 394 os::vm_page_size(), 395 HeapRegion::GrainBytes, 396 G1BlockOffsetSharedArray::N_bytes, 397 mtGC); 398 G1BlockOffsetSharedArray oa(heap, bot_storage); 399 bot_storage->commit_regions(0, num_regions_in_test); 400 401 // Set up memory regions for the heap regions. 402 MemRegion mr0(heap.start(), HeapRegion::GrainWords); 403 MemRegion mr1(mr0.end(), HeapRegion::GrainWords); 404 MemRegion mr2(mr1.end(), HeapRegion::GrainWords); 405 MemRegion mr3(mr2.end(), HeapRegion::GrainWords); 406 MemRegion mr4(mr3.end(), HeapRegion::GrainWords); 407 408 HeapRegion hr0(0, &oa, mr0); 409 HeapRegion hr1(1, &oa, mr1); 410 HeapRegion hr2(2, &oa, mr2); 411 HeapRegion hr3(3, &oa, mr3); 412 HeapRegion hr4(4, &oa, mr4); 413 l.add_ordered(&hr1); 414 l.add_ordered(&hr0); 415 l.add_ordered(&hr3); 416 l.add_ordered(&hr4); 417 l.add_ordered(&hr2); 418 assert(l.length() == num_regions_in_test, "wrong length"); 419 l.verify_list(); 420 421 bot_storage->uncommit_regions(0, num_regions_in_test); 422 delete bot_storage; 423 FREE_C_HEAP_ARRAY(HeapWord, bot_data); 424 }