1 /* 2 * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/cms/cmsCardTable.hpp" 27 #include "gc/cms/cmsHeap.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/cardTableRS.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/space.inline.hpp" 32 #include "memory/allocation.inline.hpp" 33 #include "memory/virtualspace.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/orderAccess.inline.hpp" 38 #include "runtime/vmThread.hpp" 39 40 CMSCardTable::CMSCardTable(MemRegion whole_heap) : 41 CardTableRS(whole_heap, CMSPrecleaningEnabled /* scanned_concurrently */) { 42 } 43 44 // Returns the number of chunks necessary to cover "mr". 45 size_t CMSCardTable::chunks_to_cover(MemRegion mr) { 46 return (size_t)(addr_to_chunk_index(mr.last()) - 47 addr_to_chunk_index(mr.start()) + 1); 48 } 49 50 // Returns the index of the chunk in a stride which 51 // covers the given address. 52 uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) { 53 uintptr_t card = (uintptr_t) byte_for(addr); 54 return card / ParGCCardsPerStrideChunk; 55 } 56 57 void CMSCardTable:: 58 non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, 59 OopsInGenClosure* cl, 60 CardTableRS* ct, 61 uint n_threads) { 62 assert(n_threads > 0, "expected n_threads > 0"); 63 assert(n_threads <= ParallelGCThreads, 64 "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); 65 66 // Make sure the LNC array is valid for the space. 67 jbyte** lowest_non_clean; 68 uintptr_t lowest_non_clean_base_chunk_index; 69 size_t lowest_non_clean_chunk_size; 70 get_LNC_array_for_space(sp, lowest_non_clean, 71 lowest_non_clean_base_chunk_index, 72 lowest_non_clean_chunk_size); 73 74 uint n_strides = n_threads * ParGCStridesPerThread; 75 SequentialSubTasksDone* pst = sp->par_seq_tasks(); 76 // Sets the condition for completion of the subtask (how many threads 77 // need to finish in order to be done). 78 pst->set_n_threads(n_threads); 79 pst->set_n_tasks(n_strides); 80 81 uint stride = 0; 82 while (!pst->is_task_claimed(/* reference */ stride)) { 83 process_stride(sp, mr, stride, n_strides, 84 cl, ct, 85 lowest_non_clean, 86 lowest_non_clean_base_chunk_index, 87 lowest_non_clean_chunk_size); 88 } 89 if (pst->all_tasks_completed()) { 90 // Clear lowest_non_clean array for next time. 91 intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); 92 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); 93 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { 94 intptr_t ind = ch - lowest_non_clean_base_chunk_index; 95 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, 96 "Bounds error"); 97 lowest_non_clean[ind] = NULL; 98 } 99 } 100 } 101 102 void 103 CMSCardTable:: 104 process_stride(Space* sp, 105 MemRegion used, 106 jint stride, int n_strides, 107 OopsInGenClosure* cl, 108 CardTableRS* ct, 109 jbyte** lowest_non_clean, 110 uintptr_t lowest_non_clean_base_chunk_index, 111 size_t lowest_non_clean_chunk_size) { 112 // We go from higher to lower addresses here; it wouldn't help that much 113 // because of the strided parallelism pattern used here. 114 115 // Find the first card address of the first chunk in the stride that is 116 // at least "bottom" of the used region. 117 jbyte* start_card = byte_for(used.start()); 118 jbyte* end_card = byte_after(used.last()); 119 uintptr_t start_chunk = addr_to_chunk_index(used.start()); 120 uintptr_t start_chunk_stride_num = start_chunk % n_strides; 121 jbyte* chunk_card_start; 122 123 if ((uintptr_t)stride >= start_chunk_stride_num) { 124 chunk_card_start = (jbyte*)(start_card + 125 (stride - start_chunk_stride_num) * 126 ParGCCardsPerStrideChunk); 127 } else { 128 // Go ahead to the next chunk group boundary, then to the requested stride. 129 chunk_card_start = (jbyte*)(start_card + 130 (n_strides - start_chunk_stride_num + stride) * 131 ParGCCardsPerStrideChunk); 132 } 133 134 while (chunk_card_start < end_card) { 135 // Even though we go from lower to higher addresses below, the 136 // strided parallelism can interleave the actual processing of the 137 // dirty pages in various ways. For a specific chunk within this 138 // stride, we take care to avoid double scanning or missing a card 139 // by suitably initializing the "min_done" field in process_chunk_boundaries() 140 // below, together with the dirty region extension accomplished in 141 // DirtyCardToOopClosure::do_MemRegion(). 142 jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; 143 // Invariant: chunk_mr should be fully contained within the "used" region. 144 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), 145 chunk_card_end >= end_card ? 146 used.end() : addr_for(chunk_card_end)); 147 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); 148 assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); 149 150 // This function is used by the parallel card table iteration. 151 const bool parallel = true; 152 153 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), 154 cl->gen_boundary(), 155 parallel); 156 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); 157 158 159 // Process the chunk. 160 process_chunk_boundaries(sp, 161 dcto_cl, 162 chunk_mr, 163 used, 164 lowest_non_clean, 165 lowest_non_clean_base_chunk_index, 166 lowest_non_clean_chunk_size); 167 168 // We want the LNC array updates above in process_chunk_boundaries 169 // to be visible before any of the card table value changes as a 170 // result of the dirty card iteration below. 171 OrderAccess::storestore(); 172 173 // We want to clear the cards: clear_cl here does the work of finding 174 // contiguous dirty ranges of cards to process and clear. 175 clear_cl.do_MemRegion(chunk_mr); 176 177 // Find the next chunk of the stride. 178 chunk_card_start += ParGCCardsPerStrideChunk * n_strides; 179 } 180 } 181 182 void 183 CMSCardTable:: 184 process_chunk_boundaries(Space* sp, 185 DirtyCardToOopClosure* dcto_cl, 186 MemRegion chunk_mr, 187 MemRegion used, 188 jbyte** lowest_non_clean, 189 uintptr_t lowest_non_clean_base_chunk_index, 190 size_t lowest_non_clean_chunk_size) 191 { 192 // We must worry about non-array objects that cross chunk boundaries, 193 // because such objects are both precisely and imprecisely marked: 194 // .. if the head of such an object is dirty, the entire object 195 // needs to be scanned, under the interpretation that this 196 // was an imprecise mark 197 // .. if the head of such an object is not dirty, we can assume 198 // precise marking and it's efficient to scan just the dirty 199 // cards. 200 // In either case, each scanned reference must be scanned precisely 201 // once so as to avoid cloning of a young referent. For efficiency, 202 // our closures depend on this property and do not protect against 203 // double scans. 204 205 uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start()); 206 assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error."); 207 uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; 208 209 // First, set "our" lowest_non_clean entry, which would be 210 // used by the thread scanning an adjoining left chunk with 211 // a non-array object straddling the mutual boundary. 212 // Find the object that spans our boundary, if one exists. 213 // first_block is the block possibly straddling our left boundary. 214 HeapWord* first_block = sp->block_start(chunk_mr.start()); 215 assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), 216 "First chunk should always have a co-initial block"); 217 // Does the block straddle the chunk's left boundary, and is it 218 // a non-array object? 219 if (first_block < chunk_mr.start() // first block straddles left bdry 220 && sp->block_is_obj(first_block) // first block is an object 221 && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) 222 || oop(first_block)->is_typeArray())) { 223 // Find our least non-clean card, so that a left neighbor 224 // does not scan an object straddling the mutual boundary 225 // too far to the right, and attempt to scan a portion of 226 // that object twice. 227 jbyte* first_dirty_card = NULL; 228 jbyte* last_card_of_first_obj = 229 byte_for(first_block + sp->block_size(first_block) - 1); 230 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); 231 jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); 232 jbyte* last_card_to_check = 233 (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, 234 (intptr_t) last_card_of_first_obj); 235 // Note that this does not need to go beyond our last card 236 // if our first object completely straddles this chunk. 237 for (jbyte* cur = first_card_of_cur_chunk; 238 cur <= last_card_to_check; cur++) { 239 jbyte val = *cur; 240 if (card_will_be_scanned(val)) { 241 first_dirty_card = cur; break; 242 } else { 243 assert(!card_may_have_been_dirty(val), "Error"); 244 } 245 } 246 if (first_dirty_card != NULL) { 247 assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error."); 248 assert(lowest_non_clean[cur_chunk_index] == NULL, 249 "Write exactly once : value should be stable hereafter for this round"); 250 lowest_non_clean[cur_chunk_index] = first_dirty_card; 251 } 252 } else { 253 // In this case we can help our neighbor by just asking them 254 // to stop at our first card (even though it may not be dirty). 255 assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); 256 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); 257 lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; 258 } 259 260 // Next, set our own max_to_do, which will strictly/exclusively bound 261 // the highest address that we will scan past the right end of our chunk. 262 HeapWord* max_to_do = NULL; 263 if (chunk_mr.end() < used.end()) { 264 // This is not the last chunk in the used region. 265 // What is our last block? We check the first block of 266 // the next (right) chunk rather than strictly check our last block 267 // because it's potentially more efficient to do so. 268 HeapWord* const last_block = sp->block_start(chunk_mr.end()); 269 assert(last_block <= chunk_mr.end(), "In case this property changes."); 270 if ((last_block == chunk_mr.end()) // our last block does not straddle boundary 271 || !sp->block_is_obj(last_block) // last_block isn't an object 272 || oop(last_block)->is_objArray() // last_block is an array (precisely marked) 273 || oop(last_block)->is_typeArray()) { 274 max_to_do = chunk_mr.end(); 275 } else { 276 assert(last_block < chunk_mr.end(), "Tautology"); 277 // It is a non-array object that straddles the right boundary of this chunk. 278 // last_obj_card is the card corresponding to the start of the last object 279 // in the chunk. Note that the last object may not start in 280 // the chunk. 281 jbyte* const last_obj_card = byte_for(last_block); 282 const jbyte val = *last_obj_card; 283 if (!card_will_be_scanned(val)) { 284 assert(!card_may_have_been_dirty(val), "Error"); 285 // The card containing the head is not dirty. Any marks on 286 // subsequent cards still in this chunk must have been made 287 // precisely; we can cap processing at the end of our chunk. 288 max_to_do = chunk_mr.end(); 289 } else { 290 // The last object must be considered dirty, and extends onto the 291 // following chunk. Look for a dirty card in that chunk that will 292 // bound our processing. 293 jbyte* limit_card = NULL; 294 const size_t last_block_size = sp->block_size(last_block); 295 jbyte* const last_card_of_last_obj = 296 byte_for(last_block + last_block_size - 1); 297 jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); 298 // This search potentially goes a long distance looking 299 // for the next card that will be scanned, terminating 300 // at the end of the last_block, if no earlier dirty card 301 // is found. 302 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, 303 "last card of next chunk may be wrong"); 304 for (jbyte* cur = first_card_of_next_chunk; 305 cur <= last_card_of_last_obj; cur++) { 306 const jbyte val = *cur; 307 if (card_will_be_scanned(val)) { 308 limit_card = cur; break; 309 } else { 310 assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); 311 } 312 } 313 if (limit_card != NULL) { 314 max_to_do = addr_for(limit_card); 315 assert(limit_card != NULL && max_to_do != NULL, "Error"); 316 } else { 317 // The following is a pessimistic value, because it's possible 318 // that a dirty card on a subsequent chunk has been cleared by 319 // the time we get to look at it; we'll correct for that further below, 320 // using the LNC array which records the least non-clean card 321 // before cards were cleared in a particular chunk. 322 limit_card = last_card_of_last_obj; 323 max_to_do = last_block + last_block_size; 324 assert(limit_card != NULL && max_to_do != NULL, "Error"); 325 } 326 assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, 327 "Bounds error."); 328 // It is possible that a dirty card for the last object may have been 329 // cleared before we had a chance to examine it. In that case, the value 330 // will have been logged in the LNC for that chunk. 331 // We need to examine as many chunks to the right as this object 332 // covers. However, we need to bound this checking to the largest 333 // entry in the LNC array: this is because the heap may expand 334 // after the LNC array has been created but before we reach this point, 335 // and the last block in our chunk may have been expanded to include 336 // the expansion delta (and possibly subsequently allocated from, so 337 // it wouldn't be sufficient to check whether that last block was 338 // or was not an object at this point). 339 uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) 340 - lowest_non_clean_base_chunk_index; 341 const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) 342 - lowest_non_clean_base_chunk_index; 343 if (last_chunk_index_to_check > last_chunk_index) { 344 assert(last_block + last_block_size > used.end(), 345 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" 346 " does not exceed used.end() = " PTR_FORMAT "," 347 " yet last_chunk_index_to_check " INTPTR_FORMAT 348 " exceeds last_chunk_index " INTPTR_FORMAT, 349 p2i(last_block), p2i(last_block + last_block_size), 350 p2i(used.end()), 351 last_chunk_index_to_check, last_chunk_index); 352 assert(sp->used_region().end() > used.end(), 353 "Expansion did not happen: " 354 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", 355 p2i(sp->used_region().start()), p2i(sp->used_region().end()), 356 p2i(used.start()), p2i(used.end())); 357 last_chunk_index_to_check = last_chunk_index; 358 } 359 for (uintptr_t lnc_index = cur_chunk_index + 1; 360 lnc_index <= last_chunk_index_to_check; 361 lnc_index++) { 362 jbyte* lnc_card = lowest_non_clean[lnc_index]; 363 if (lnc_card != NULL) { 364 // we can stop at the first non-NULL entry we find 365 if (lnc_card <= limit_card) { 366 limit_card = lnc_card; 367 max_to_do = addr_for(limit_card); 368 assert(limit_card != NULL && max_to_do != NULL, "Error"); 369 } 370 // In any case, we break now 371 break; 372 } // else continue to look for a non-NULL entry if any 373 } 374 assert(limit_card != NULL && max_to_do != NULL, "Error"); 375 } 376 assert(max_to_do != NULL, "OOPS 1 !"); 377 } 378 assert(max_to_do != NULL, "OOPS 2!"); 379 } else { 380 max_to_do = used.end(); 381 } 382 assert(max_to_do != NULL, "OOPS 3!"); 383 // Now we can set the closure we're using so it doesn't to beyond 384 // max_to_do. 385 dcto_cl->set_min_done(max_to_do); 386 #ifndef PRODUCT 387 dcto_cl->set_last_bottom(max_to_do); 388 #endif 389 } 390 391 void 392 CMSCardTable:: 393 get_LNC_array_for_space(Space* sp, 394 jbyte**& lowest_non_clean, 395 uintptr_t& lowest_non_clean_base_chunk_index, 396 size_t& lowest_non_clean_chunk_size) { 397 398 int i = find_covering_region_containing(sp->bottom()); 399 MemRegion covered = _covered[i]; 400 size_t n_chunks = chunks_to_cover(covered); 401 402 // Only the first thread to obtain the lock will resize the 403 // LNC array for the covered region. Any later expansion can't affect 404 // the used_at_save_marks region. 405 // (I observed a bug in which the first thread to execute this would 406 // resize, and then it would cause "expand_and_allocate" that would 407 // increase the number of chunks in the covered region. Then a second 408 // thread would come and execute this, see that the size didn't match, 409 // and free and allocate again. So the first thread would be using a 410 // freed "_lowest_non_clean" array.) 411 412 // Do a dirty read here. If we pass the conditional then take the rare 413 // event lock and do the read again in case some other thread had already 414 // succeeded and done the resize. 415 int cur_collection = CMSHeap::heap()->total_collections(); 416 // Updated _last_LNC_resizing_collection[i] must not be visible before 417 // _lowest_non_clean and friends are visible. Therefore use acquire/release 418 // to guarantee this on non TSO architecures. 419 if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { 420 MutexLocker x(ParGCRareEvent_lock); 421 // This load_acquire is here for clarity only. The MutexLocker already fences. 422 if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { 423 if (_lowest_non_clean[i] == NULL || 424 n_chunks != _lowest_non_clean_chunk_size[i]) { 425 426 // Should we delete the old? 427 if (_lowest_non_clean[i] != NULL) { 428 assert(n_chunks != _lowest_non_clean_chunk_size[i], 429 "logical consequence"); 430 FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); 431 _lowest_non_clean[i] = NULL; 432 } 433 // Now allocate a new one if necessary. 434 if (_lowest_non_clean[i] == NULL) { 435 _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); 436 _lowest_non_clean_chunk_size[i] = n_chunks; 437 _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); 438 for (int j = 0; j < (int)n_chunks; j++) 439 _lowest_non_clean[i][j] = NULL; 440 } 441 } 442 // Make sure this gets visible only after _lowest_non_clean* was initialized 443 OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); 444 } 445 } 446 // In any case, now do the initialization. 447 lowest_non_clean = _lowest_non_clean[i]; 448 lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; 449 lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; 450 } 451 452 #ifdef ASSERT 453 void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const { 454 MemRegion ur = sp->used_region(); 455 MemRegion urasm = sp->used_region_at_save_marks(); 456 457 if (!ur.contains(urasm)) { 458 log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " 459 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 460 "[" PTR_FORMAT ", " PTR_FORMAT ")", 461 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 462 MemRegion ur2 = sp->used_region(); 463 MemRegion urasm2 = sp->used_region_at_save_marks(); 464 if (!ur.equals(ur2)) { 465 log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); 466 } 467 if (!urasm.equals(urasm2)) { 468 log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); 469 } 470 ShouldNotReachHere(); 471 } 472 } 473 #endif // ASSERT