1 /* 2 * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectionSet.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1RootClosures.hpp" 32 #include "gc/g1/g1StringDedup.hpp" 33 #include "gc/g1/g1Trace.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/prefetch.inline.hpp" 39 40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, 41 G1RedirtyCardsQueueSet* rdcqs, 42 uint worker_id, 43 size_t young_cset_length, 44 size_t optional_cset_length) 45 : _g1h(g1h), 46 _task_queue(g1h->task_queue(worker_id)), 47 _rdcq(rdcqs), 48 _ct(g1h->card_table()), 49 _closures(NULL), 50 _plab_allocator(NULL), 51 _age_table(false), 52 _tenuring_threshold(g1h->policy()->tenuring_threshold()), 53 _scanner(g1h, this), 54 _worker_id(worker_id), 55 _last_enqueued_card(SIZE_MAX), 56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), 57 _stack_trim_lower_threshold(GCDrainStackTargetSize), 58 _trim_ticks(), 59 _surviving_young_words_base(NULL), 60 _surviving_young_words(NULL), 61 _surviving_words_length(young_cset_length + 1), 62 _old_gen_is_full(false), 63 _objarray_scan_chunk_size(ParGCArrayScanChunk), 64 _objarray_length_offset_in_bytes(arrayOopDesc::length_offset_in_bytes()), 65 _num_optional_regions(optional_cset_length), 66 _numa(g1h->numa()), 67 _obj_alloc_stat(NULL) 68 { 69 // We allocate number of young gen regions in the collection set plus one 70 // entries, since entry 0 keeps track of surviving bytes for non-young regions. 71 // We also add a few elements at the beginning and at the end in 72 // an attempt to eliminate cache contention 73 const size_t padding_elem_num = (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)); 74 size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num; 75 76 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 77 _surviving_young_words = _surviving_young_words_base + padding_elem_num; 78 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t)); 79 80 _plab_allocator = new G1PLABAllocator(_g1h->allocator()); 81 82 // The dest for Young is used when the objects are aged enough to 83 // need to be moved to the next space. 84 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old; 85 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old; 86 87 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); 88 89 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions]; 90 91 initialize_numa_stats(); 92 } 93 94 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) { 95 _rdcq.flush(); 96 flush_numa_stats(); 97 // Update allocation statistics. 98 _plab_allocator->flush_and_retire_stats(); 99 _g1h->policy()->record_age_table(&_age_table); 100 101 size_t sum = 0; 102 for (uint i = 0; i < _surviving_words_length; i++) { 103 surviving_young_words[i] += _surviving_young_words[i]; 104 sum += _surviving_young_words[i]; 105 } 106 return sum; 107 } 108 109 G1ParScanThreadState::~G1ParScanThreadState() { 110 delete _plab_allocator; 111 delete _closures; 112 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 113 delete[] _oops_into_optional_regions; 114 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat); 115 } 116 117 size_t G1ParScanThreadState::lab_waste_words() const { 118 return _plab_allocator->waste(); 119 } 120 121 size_t G1ParScanThreadState::lab_undo_waste_words() const { 122 return _plab_allocator->undo_waste(); 123 } 124 125 #ifdef ASSERT 126 void G1ParScanThreadState::verify_task(narrowOop* task) const { 127 assert(task != NULL, "invariant"); 128 assert(UseCompressedOops, "sanity"); 129 oop p = RawAccess<>::oop_load(task); 130 assert(_g1h->is_in_reserved(p), 131 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 132 } 133 134 void G1ParScanThreadState::verify_task(oop* task) const { 135 assert(task != NULL, "invariant"); 136 oop p = RawAccess<>::oop_load(task); 137 assert(_g1h->is_in_reserved(p), 138 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 139 } 140 141 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const { 142 // Must be in the collection set--it's already been copied. 143 oop p = task.to_source_array(); 144 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p)); 145 } 146 147 void G1ParScanThreadState::verify_task(ScannerTask task) const { 148 if (task.is_narrow_oop_ptr()) { 149 verify_task(task.to_narrow_oop_ptr()); 150 } else if (task.is_oop_ptr()) { 151 verify_task(task.to_oop_ptr()); 152 } else if (task.is_partial_array_task()) { 153 verify_task(task.to_partial_array_task()); 154 } else { 155 ShouldNotReachHere(); 156 } 157 } 158 #endif // ASSERT 159 160 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) { 161 // Reference should not be NULL here as such are never pushed to the task queue. 162 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 163 164 // Although we never intentionally push references outside of the collection 165 // set, due to (benign) races in the claim mechanism during RSet scanning more 166 // than one thread might claim the same card. So the same card may be 167 // processed multiple times, and so we might get references into old gen here. 168 // So we need to redo this check. 169 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); 170 // References pushed onto the work stack should never point to a humongous region 171 // as they are not added to the collection set due to above precondition. 172 assert(!region_attr.is_humongous(), 173 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, 174 p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p)); 175 176 if (!region_attr.is_in_cset()) { 177 // In this case somebody else already did all the work. 178 return; 179 } 180 181 markWord m = obj->mark_raw(); 182 if (m.is_marked()) { 183 obj = (oop) m.decode_pointer(); 184 } else { 185 obj = do_copy_to_survivor_space(region_attr, obj, m); 186 } 187 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 188 189 assert(obj != NULL, "Must be"); 190 if (HeapRegion::is_in_same_region(p, obj)) { 191 return; 192 } 193 HeapRegion* from = _g1h->heap_region_containing(p); 194 if (!from->is_young()) { 195 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj); 196 } 197 } 198 199 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) { 200 oop from_obj = task.to_source_array(); 201 202 assert(_g1h->is_in_reserved(from_obj), "must be in heap."); 203 assert(from_obj->is_objArray(), "must be obj array"); 204 assert(from_obj->is_forwarded(), "must be forwarded"); 205 206 oop to_obj = from_obj->forwardee(); 207 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 208 assert(to_obj->is_objArray(), "must be obj array"); 209 objArrayOop to_array = objArrayOop(to_obj); 210 211 // The next chunk index is in the length field of the to-space object. 212 // Atomically increment by the chunk size to claim the associated chunk. 213 char* to_addr = cast_from_oop<char*>(to_array); 214 char* length_addr_raw = (to_addr + _objarray_length_offset_in_bytes); 215 volatile int* length_addr = reinterpret_cast<int*>(length_addr_raw); 216 int end = Atomic::add(length_addr, _objarray_scan_chunk_size, memory_order_relaxed); 217 #ifdef ASSERT 218 // The from-space object contains the real length. 219 int length = objArrayOop(from_obj)->length(); 220 assert(end <= length, "invariant: end %d, length %d", end, length); 221 assert(((length - end) % _objarray_scan_chunk_size) == 0, 222 "invariant: end %d, length %d, chunk size %d", 223 end, length, _objarray_scan_chunk_size); 224 #endif // ASSERT 225 226 HeapRegion* hr = _g1h->heap_region_containing(to_array); 227 G1ScanInYoungSetter x(&_scanner, hr->is_young()); 228 // Process claimed chunk. Note that the length field of 229 // to_obj_array is not correct. Fortunately, the iteration ignores 230 // the length and just relies on start / end. However, it does 231 // return the (incorrect) length, but we ignore it. 232 to_array->oop_iterate_range(&_scanner, end - _objarray_scan_chunk_size, end); 233 } 234 235 oop G1ParScanThreadState::start_partial_objArray(G1HeapRegionAttr dest_attr, 236 oop from_obj, 237 oop to_obj) { 238 assert(from_obj->is_objArray(), "precondition"); 239 assert(from_obj->is_forwarded(), "precondition"); 240 assert(from_obj->forwardee() == to_obj, "precondition"); 241 assert(from_obj != to_obj, "should not be scanning self-forwarded objects"); 242 assert(to_obj->is_objArray(), "precondition"); 243 244 objArrayOop to_array = objArrayOop(to_obj); 245 246 int length = objArrayOop(from_obj)->length(); 247 int chunks = length / _objarray_scan_chunk_size; 248 int end = length % _objarray_scan_chunk_size; 249 assert(end <= length, "invariant"); 250 assert(((length - end) % _objarray_scan_chunk_size) == 0, "invariant"); 251 // The value of end can be 0, either because of a 0-length array or 252 // because length is a multiple of the chunk size. Both of those 253 // are rare and handled in the normal course of the iteration, so 254 // not worth doing anything special about here. 255 256 // Set to's length to end of initial chunk. Partial tasks use that 257 // length field as the start of the next chunk to process. Must be 258 // done before enqueuing partial scan tasks, in case other threads 259 // steal any of those tasks. 260 to_array->set_length(end); 261 // Push partial scan tasks for all but the initial chunk. Pushed 262 // before processing the initial chunk to allow other workers to 263 // steal while we're processing. 264 for (int i = 0; i < chunks; ++i) { 265 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj))); 266 } 267 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young()); 268 // Process the initial chunk. No need to process the type in the 269 // klass, as it will already be handled by processing the built-in 270 // module. The length of to_array is not correct, but fortunately 271 // the iteration ignores that length field and relies on start/end. 272 to_array->oop_iterate_range(&_scanner, 0, end); 273 return to_array; 274 } 275 276 void G1ParScanThreadState::dispatch_task(ScannerTask task) { 277 verify_task(task); 278 if (task.is_narrow_oop_ptr()) { 279 do_oop_evac(task.to_narrow_oop_ptr()); 280 } else if (task.is_oop_ptr()) { 281 do_oop_evac(task.to_oop_ptr()); 282 } else { 283 do_partial_array(task.to_partial_array_task()); 284 } 285 } 286 287 // Process tasks until overflow queue is empty and local queue 288 // contains no more than threshold entries. NOINLINE to prevent 289 // inlining into steal_and_trim_queue. 290 ATTRIBUTE_FLATTEN NOINLINE 291 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) { 292 ScannerTask task; 293 do { 294 while (_task_queue->pop_overflow(task)) { 295 if (!_task_queue->try_push_to_taskqueue(task)) { 296 dispatch_task(task); 297 } 298 } 299 while (_task_queue->pop_local(task, threshold)) { 300 dispatch_task(task); 301 } 302 } while (!_task_queue->overflow_empty()); 303 } 304 305 ATTRIBUTE_FLATTEN 306 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) { 307 ScannerTask stolen_task; 308 while (task_queues->steal(_worker_id, stolen_task)) { 309 dispatch_task(stolen_task); 310 // Processing stolen task may have added tasks to our queue. 311 trim_queue(); 312 } 313 } 314 315 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, 316 size_t word_sz, 317 bool previous_plab_refill_failed, 318 uint node_index) { 319 320 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str()); 321 322 // Right now we only have two types of regions (young / old) so 323 // let's keep the logic here simple. We can generalize it when necessary. 324 if (dest->is_young()) { 325 bool plab_refill_in_old_failed = false; 326 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, 327 word_sz, 328 &plab_refill_in_old_failed, 329 node_index); 330 // Make sure that we won't attempt to copy any other objects out 331 // of a survivor region (given that apparently we cannot allocate 332 // any new ones) to avoid coming into this slow path again and again. 333 // Only consider failed PLAB refill here: failed inline allocations are 334 // typically large, so not indicative of remaining space. 335 if (previous_plab_refill_failed) { 336 _tenuring_threshold = 0; 337 } 338 339 if (obj_ptr != NULL) { 340 dest->set_old(); 341 } else { 342 // We just failed to allocate in old gen. The same idea as explained above 343 // for making survivor gen unavailable for allocation applies for old gen. 344 _old_gen_is_full = plab_refill_in_old_failed; 345 } 346 return obj_ptr; 347 } else { 348 _old_gen_is_full = previous_plab_refill_failed; 349 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str()); 350 // no other space to try. 351 return NULL; 352 } 353 } 354 355 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) { 356 if (region_attr.is_young()) { 357 age = !m.has_displaced_mark_helper() ? m.age() 358 : m.displaced_mark_helper().age(); 359 if (age < _tenuring_threshold) { 360 return region_attr; 361 } 362 } 363 return dest(region_attr); 364 } 365 366 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, 367 oop const old, size_t word_sz, uint age, 368 HeapWord * const obj_ptr, uint node_index) const { 369 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); 370 if (alloc_buf->contains(obj_ptr)) { 371 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, 372 dest_attr.type() == G1HeapRegionAttr::Old, 373 alloc_buf->word_sz() * HeapWordSize); 374 } else { 375 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, 376 dest_attr.type() == G1HeapRegionAttr::Old); 377 } 378 } 379 380 NOINLINE 381 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, 382 oop old, 383 size_t word_sz, 384 uint age, 385 uint node_index) { 386 HeapWord* obj_ptr = NULL; 387 // Try slow-path allocation unless we're allocating old and old is already full. 388 if (!(dest_attr->is_old() && _old_gen_is_full)) { 389 bool plab_refill_failed = false; 390 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr, 391 word_sz, 392 &plab_refill_failed, 393 node_index); 394 if (obj_ptr == NULL) { 395 obj_ptr = allocate_in_next_plab(dest_attr, 396 word_sz, 397 plab_refill_failed, 398 node_index); 399 } 400 } 401 if (obj_ptr != NULL) { 402 update_numa_stats(node_index); 403 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { 404 // The events are checked individually as part of the actual commit 405 report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index); 406 } 407 } 408 return obj_ptr; 409 } 410 411 NOINLINE 412 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr, 413 HeapWord* obj_ptr, 414 size_t word_sz, 415 uint node_index) { 416 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 417 } 418 419 // Private inline function, for direct internal use and providing the 420 // implementation of the public not-inline function. 421 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr, 422 oop const old, 423 markWord const old_mark) { 424 assert(region_attr.is_in_cset(), 425 "Unexpected region attr type: %s", region_attr.get_type_str()); 426 427 // Get the klass once. We'll need it again later, and this avoids 428 // re-decoding when it's compressed. 429 Klass* klass = old->klass(); 430 const size_t word_sz = old->size_given_klass(klass); 431 432 uint age = 0; 433 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age); 434 HeapRegion* const from_region = _g1h->heap_region_containing(old); 435 uint node_index = from_region->node_index(); 436 437 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); 438 439 // PLAB allocations should succeed most of the time, so we'll 440 // normally check against NULL once and that's it. 441 if (obj_ptr == NULL) { 442 obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); 443 if (obj_ptr == NULL) { 444 // This will either forward-to-self, or detect that someone else has 445 // installed a forwarding pointer. 446 return handle_evacuation_failure_par(old, old_mark); 447 } 448 } 449 450 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 451 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 452 453 #ifndef PRODUCT 454 // Should this evacuation fail? 455 if (_g1h->evacuation_should_fail()) { 456 // Doing this after all the allocation attempts also tests the 457 // undo_allocation() method too. 458 undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 459 return handle_evacuation_failure_par(old, old_mark); 460 } 461 #endif // !PRODUCT 462 463 // We're going to allocate linearly, so might as well prefetch ahead. 464 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 465 466 const oop obj = oop(obj_ptr); 467 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); 468 if (forward_ptr == NULL) { 469 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz); 470 471 { 472 const uint young_index = from_region->young_index_in_cset(); 473 assert((from_region->is_young() && young_index > 0) || 474 (!from_region->is_young() && young_index == 0), "invariant" ); 475 _surviving_young_words[young_index] += word_sz; 476 } 477 478 if (dest_attr.is_young()) { 479 if (age < markWord::max_age) { 480 age++; 481 } 482 if (old_mark.has_displaced_mark_helper()) { 483 // In this case, we have to install the mark word first, 484 // otherwise obj looks to be forwarded (the old mark word, 485 // which contains the forward pointer, was copied) 486 obj->set_mark_raw(old_mark); 487 markWord new_mark = old_mark.displaced_mark_helper().set_age(age); 488 old_mark.set_displaced_mark_helper(new_mark); 489 } else { 490 obj->set_mark_raw(old_mark.set_age(age)); 491 } 492 _age_table.add(age, word_sz); 493 } else { 494 obj->set_mark_raw(old_mark); 495 } 496 497 // Most objects are not arrays, so do one array check rather than both 498 // typeArray and objArray checks for each object. 499 if (klass->is_array_klass()) { 500 if (klass->is_typeArray_klass()) { 501 // Nothing needs to be done for typeArrays. Body doesn't contain 502 // any oops to scan, and the type in the klass will already be handled 503 // by processing the built-in module. 504 return obj; 505 } else if (klass->is_objArray_klass()) { 506 // Do special handling for objArray. 507 return start_partial_objArray(dest_attr, old, obj); 508 } 509 // Not a special array, so fall through to generic handling. 510 } 511 512 if (G1StringDedup::is_enabled() && (klass == SystemDictionary::String_klass())) { 513 const bool is_from_young = region_attr.is_young(); 514 const bool is_to_young = dest_attr.is_young(); 515 assert(is_from_young == from_region->is_young(), 516 "sanity"); 517 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), 518 "sanity"); 519 G1StringDedup::enqueue_from_evacuation(is_from_young, 520 is_to_young, 521 _worker_id, 522 obj); 523 } 524 525 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young()); 526 obj->oop_iterate_backwards(&_scanner); 527 return obj; 528 529 } else { 530 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 531 return forward_ptr; 532 } 533 } 534 535 // Public not-inline entry point. 536 ATTRIBUTE_FLATTEN 537 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr, 538 oop old, 539 markWord old_mark) { 540 return do_copy_to_survivor_space(region_attr, old, old_mark); 541 } 542 543 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 544 assert(worker_id < _n_workers, "out of bounds access"); 545 if (_states[worker_id] == NULL) { 546 _states[worker_id] = 547 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length); 548 } 549 return _states[worker_id]; 550 } 551 552 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 553 assert(_flushed, "thread local state from the per thread states should have been flushed"); 554 return _surviving_young_words_total; 555 } 556 557 void G1ParScanThreadStateSet::flush() { 558 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 559 560 for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) { 561 G1ParScanThreadState* pss = _states[worker_id]; 562 563 if (pss == NULL) { 564 continue; 565 } 566 567 G1GCPhaseTimes* p = _g1h->phase_times(); 568 569 // Need to get the following two before the call to G1ParThreadScanState::flush() 570 // because it resets the PLAB allocator where we get this info from. 571 size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize; 572 size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize; 573 size_t copied_bytes = pss->flush(_surviving_young_words_total) * HeapWordSize; 574 575 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes); 576 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes); 577 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes); 578 579 delete pss; 580 _states[worker_id] = NULL; 581 } 582 _flushed = true; 583 } 584 585 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) { 586 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 587 G1ParScanThreadState* pss = _states[worker_index]; 588 589 if (pss == NULL) { 590 continue; 591 } 592 593 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory(); 594 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory); 595 } 596 } 597 598 NOINLINE 599 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) { 600 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 601 602 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); 603 if (forward_ptr == NULL) { 604 // Forward-to-self succeeded. We are the "owner" of the object. 605 HeapRegion* r = _g1h->heap_region_containing(old); 606 607 if (!r->evacuation_failed()) { 608 r->set_evacuation_failed(true); 609 _g1h->hr_printer()->evac_failure(r); 610 } 611 612 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 613 614 G1ScanInYoungSetter x(&_scanner, r->is_young()); 615 old->oop_iterate_backwards(&_scanner); 616 617 return old; 618 } else { 619 // Forward-to-self failed. Either someone else managed to allocate 620 // space for this object (old != forward_ptr) or they beat us in 621 // self-forwarding it (old == forward_ptr). 622 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), 623 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 624 "should not be in the CSet", 625 p2i(old), p2i(forward_ptr)); 626 return forward_ptr; 627 } 628 } 629 630 void G1ParScanThreadState::initialize_numa_stats() { 631 if (_numa->is_enabled()) { 632 LogTarget(Info, gc, heap, numa) lt; 633 634 if (lt.is_enabled()) { 635 uint num_nodes = _numa->num_active_nodes(); 636 // Record only if there are multiple active nodes. 637 _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC); 638 memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes); 639 } 640 } 641 } 642 643 void G1ParScanThreadState::flush_numa_stats() { 644 if (_obj_alloc_stat != NULL) { 645 uint node_index = _numa->index_of_current_thread(); 646 _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat); 647 } 648 } 649 650 void G1ParScanThreadState::update_numa_stats(uint node_index) { 651 if (_obj_alloc_stat != NULL) { 652 _obj_alloc_stat[node_index]++; 653 } 654 } 655 656 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, 657 G1RedirtyCardsQueueSet* rdcqs, 658 uint n_workers, 659 size_t young_cset_length, 660 size_t optional_cset_length) : 661 _g1h(g1h), 662 _rdcqs(rdcqs), 663 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), 664 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)), 665 _young_cset_length(young_cset_length), 666 _optional_cset_length(optional_cset_length), 667 _n_workers(n_workers), 668 _flushed(false) { 669 for (uint i = 0; i < n_workers; ++i) { 670 _states[i] = NULL; 671 } 672 memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t)); 673 } 674 675 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { 676 assert(_flushed, "thread local state from the per thread states should have been flushed"); 677 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 678 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 679 }