1 /* 2 * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectionSet.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1RootClosures.hpp" 32 #include "gc/g1/g1StringDedup.hpp" 33 #include "gc/g1/g1Trace.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/prefetch.inline.hpp" 39 40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, 41 G1RedirtyCardsQueueSet* rdcqs, 42 uint worker_id, 43 size_t young_cset_length, 44 size_t optional_cset_length) 45 : _g1h(g1h), 46 _task_queue(g1h->task_queue(worker_id)), 47 _rdcq(rdcqs), 48 _ct(g1h->card_table()), 49 _closures(NULL), 50 _plab_allocator(NULL), 51 _age_table(false), 52 _tenuring_threshold(g1h->policy()->tenuring_threshold()), 53 _scanner(g1h, this), 54 _worker_id(worker_id), 55 _last_enqueued_card(SIZE_MAX), 56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), 57 _stack_trim_lower_threshold(GCDrainStackTargetSize), 58 _trim_ticks(), 59 _surviving_young_words_base(NULL), 60 _surviving_young_words(NULL), 61 _surviving_words_length(young_cset_length + 1), 62 _old_gen_is_full(false), 63 _num_optional_regions(optional_cset_length), 64 _numa(g1h->numa()), 65 _obj_alloc_stat(NULL) 66 { 67 // We allocate number of young gen regions in the collection set plus one 68 // entries, since entry 0 keeps track of surviving bytes for non-young regions. 69 // We also add a few elements at the beginning and at the end in 70 // an attempt to eliminate cache contention 71 const size_t padding_elem_num = (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)); 72 size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num; 73 74 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 75 _surviving_young_words = _surviving_young_words_base + padding_elem_num; 76 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t)); 77 78 _plab_allocator = new G1PLABAllocator(_g1h->allocator()); 79 80 // The dest for Young is used when the objects are aged enough to 81 // need to be moved to the next space. 82 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old; 83 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old; 84 85 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); 86 87 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions]; 88 89 initialize_numa_stats(); 90 } 91 92 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) { 93 _rdcq.flush(); 94 flush_numa_stats(); 95 // Update allocation statistics. 96 _plab_allocator->flush_and_retire_stats(); 97 _g1h->policy()->record_age_table(&_age_table); 98 99 size_t sum = 0; 100 for (uint i = 0; i < _surviving_words_length; i++) { 101 surviving_young_words[i] += _surviving_young_words[i]; 102 sum += _surviving_young_words[i]; 103 } 104 return sum; 105 } 106 107 G1ParScanThreadState::~G1ParScanThreadState() { 108 delete _plab_allocator; 109 delete _closures; 110 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 111 delete[] _oops_into_optional_regions; 112 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat); 113 } 114 115 size_t G1ParScanThreadState::lab_waste_words() const { 116 return _plab_allocator->waste(); 117 } 118 119 size_t G1ParScanThreadState::lab_undo_waste_words() const { 120 return _plab_allocator->undo_waste(); 121 } 122 123 #ifdef ASSERT 124 void G1ParScanThreadState::verify_task(narrowOop* task) const { 125 assert(task != NULL, "invariant"); 126 assert(UseCompressedOops, "sanity"); 127 oop p = RawAccess<>::oop_load(task); 128 assert(_g1h->is_in_g1_reserved(p), 129 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 130 } 131 132 void G1ParScanThreadState::verify_task(oop* task) const { 133 assert(task != NULL, "invariant"); 134 oop p = RawAccess<>::oop_load(task); 135 assert(_g1h->is_in_g1_reserved(p), 136 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 137 } 138 139 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const { 140 // Must be in the collection set--it's already been copied. 141 oop p = task.to_source_array(); 142 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p)); 143 } 144 145 void G1ParScanThreadState::verify_task(ScannerTask task) const { 146 if (task.is_narrow_oop_ptr()) { 147 verify_task(task.to_narrow_oop_ptr()); 148 } else if (task.is_oop_ptr()) { 149 verify_task(task.to_oop_ptr()); 150 } else if (task.is_partial_array_task()) { 151 verify_task(task.to_partial_array_task()); 152 } else { 153 ShouldNotReachHere(); 154 } 155 } 156 #endif // ASSERT 157 158 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) { 159 // Reference should not be NULL here as such are never pushed to the task queue. 160 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 161 162 // Although we never intentionally push references outside of the collection 163 // set, due to (benign) races in the claim mechanism during RSet scanning more 164 // than one thread might claim the same card. So the same card may be 165 // processed multiple times, and so we might get references into old gen here. 166 // So we need to redo this check. 167 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); 168 // References pushed onto the work stack should never point to a humongous region 169 // as they are not added to the collection set due to above precondition. 170 assert(!region_attr.is_humongous(), 171 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, 172 p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p)); 173 174 if (!region_attr.is_in_cset()) { 175 // In this case somebody else already did all the work. 176 return; 177 } 178 179 markWord m = obj->mark_raw(); 180 if (m.is_marked()) { 181 obj = (oop) m.decode_pointer(); 182 } else { 183 obj = do_copy_to_survivor_space(region_attr, obj, m); 184 } 185 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 186 187 assert(obj != NULL, "Must be"); 188 if (HeapRegion::is_in_same_region(p, obj)) { 189 return; 190 } 191 HeapRegion* from = _g1h->heap_region_containing(p); 192 if (!from->is_young()) { 193 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj); 194 } 195 } 196 197 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) { 198 oop from_obj = task.to_source_array(); 199 200 assert(_g1h->is_in_reserved(from_obj), "must be in heap."); 201 assert(from_obj->is_objArray(), "must be obj array"); 202 objArrayOop from_obj_array = objArrayOop(from_obj); 203 // The from-space object contains the real length. 204 int length = from_obj_array->length(); 205 206 assert(from_obj->is_forwarded(), "must be forwarded"); 207 oop to_obj = from_obj->forwardee(); 208 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 209 objArrayOop to_obj_array = objArrayOop(to_obj); 210 // We keep track of the next start index in the length field of the 211 // to-space object. 212 int next_index = to_obj_array->length(); 213 assert(0 <= next_index && next_index < length, 214 "invariant, next index: %d, length: %d", next_index, length); 215 216 int start = next_index; 217 int end = length; 218 int remainder = end - start; 219 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. 220 if (remainder > 2 * ParGCArrayScanChunk) { 221 end = start + ParGCArrayScanChunk; 222 to_obj_array->set_length(end); 223 // Push the remainder before we process the range in case another 224 // worker has run out of things to do and can steal it. 225 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj))); 226 } else { 227 assert(length == end, "sanity"); 228 // We'll process the final range for this object. Restore the length 229 // so that the heap remains parsable in case of evacuation failure. 230 to_obj_array->set_length(end); 231 } 232 233 HeapRegion* hr = _g1h->heap_region_containing(to_obj); 234 G1ScanInYoungSetter x(&_scanner, hr->is_young()); 235 // Process indexes [start,end). It will also process the header 236 // along with the first chunk (i.e., the chunk with start == 0). 237 // Note that at this point the length field of to_obj_array is not 238 // correct given that we are using it to keep track of the next 239 // start index. oop_iterate_range() (thankfully!) ignores the length 240 // field and only relies on the start / end parameters. It does 241 // however return the size of the object which will be incorrect. So 242 // we have to ignore it even if we wanted to use it. 243 to_obj_array->oop_iterate_range(&_scanner, start, end); 244 } 245 246 void G1ParScanThreadState::dispatch_task(ScannerTask task) { 247 verify_task(task); 248 if (task.is_narrow_oop_ptr()) { 249 do_oop_evac(task.to_narrow_oop_ptr()); 250 } else if (task.is_oop_ptr()) { 251 do_oop_evac(task.to_oop_ptr()); 252 } else { 253 do_partial_array(task.to_partial_array_task()); 254 } 255 } 256 257 // Process tasks until overflow queue is empty and local queue 258 // contains no more than threshold entries. NOINLINE to prevent 259 // inlining into steal_and_trim_queue. 260 ATTRIBUTE_FLATTEN NOINLINE 261 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) { 262 ScannerTask task; 263 do { 264 while (_task_queue->pop_overflow(task)) { 265 if (!_task_queue->try_push_to_taskqueue(task)) { 266 dispatch_task(task); 267 } 268 } 269 while (_task_queue->pop_local(task, threshold)) { 270 dispatch_task(task); 271 } 272 } while (!_task_queue->overflow_empty()); 273 } 274 275 ATTRIBUTE_FLATTEN 276 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) { 277 ScannerTask stolen_task; 278 while (task_queues->steal(_worker_id, stolen_task)) { 279 dispatch_task(stolen_task); 280 // Processing stolen task may have added tasks to our queue. 281 trim_queue(); 282 } 283 } 284 285 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, 286 size_t word_sz, 287 bool previous_plab_refill_failed, 288 uint node_index) { 289 290 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str()); 291 292 // Right now we only have two types of regions (young / old) so 293 // let's keep the logic here simple. We can generalize it when necessary. 294 if (dest->is_young()) { 295 bool plab_refill_in_old_failed = false; 296 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, 297 word_sz, 298 &plab_refill_in_old_failed, 299 node_index); 300 // Make sure that we won't attempt to copy any other objects out 301 // of a survivor region (given that apparently we cannot allocate 302 // any new ones) to avoid coming into this slow path again and again. 303 // Only consider failed PLAB refill here: failed inline allocations are 304 // typically large, so not indicative of remaining space. 305 if (previous_plab_refill_failed) { 306 _tenuring_threshold = 0; 307 } 308 309 if (obj_ptr != NULL) { 310 dest->set_old(); 311 } else { 312 // We just failed to allocate in old gen. The same idea as explained above 313 // for making survivor gen unavailable for allocation applies for old gen. 314 _old_gen_is_full = plab_refill_in_old_failed; 315 } 316 return obj_ptr; 317 } else { 318 _old_gen_is_full = previous_plab_refill_failed; 319 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str()); 320 // no other space to try. 321 return NULL; 322 } 323 } 324 325 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) { 326 if (region_attr.is_young()) { 327 age = !m.has_displaced_mark_helper() ? m.age() 328 : m.displaced_mark_helper().age(); 329 if (age < _tenuring_threshold) { 330 return region_attr; 331 } 332 } 333 return dest(region_attr); 334 } 335 336 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, 337 oop const old, size_t word_sz, uint age, 338 HeapWord * const obj_ptr, uint node_index) const { 339 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); 340 if (alloc_buf->contains(obj_ptr)) { 341 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, 342 dest_attr.type() == G1HeapRegionAttr::Old, 343 alloc_buf->word_sz() * HeapWordSize); 344 } else { 345 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, 346 dest_attr.type() == G1HeapRegionAttr::Old); 347 } 348 } 349 350 // Private inline function, for direct internal use and providing the 351 // implementation of the public not-inline function. 352 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr, 353 oop const old, 354 markWord const old_mark) { 355 const size_t word_sz = old->size(); 356 357 uint age = 0; 358 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age); 359 // The second clause is to prevent premature evacuation failure in case there 360 // is still space in survivor, but old gen is full. 361 if (_old_gen_is_full && dest_attr.is_old()) { 362 return handle_evacuation_failure_par(old, old_mark); 363 } 364 HeapRegion* const from_region = _g1h->heap_region_containing(old); 365 uint node_index = from_region->node_index(); 366 367 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); 368 369 // PLAB allocations should succeed most of the time, so we'll 370 // normally check against NULL once and that's it. 371 if (obj_ptr == NULL) { 372 bool plab_refill_failed = false; 373 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index); 374 if (obj_ptr == NULL) { 375 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str()); 376 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index); 377 if (obj_ptr == NULL) { 378 // This will either forward-to-self, or detect that someone else has 379 // installed a forwarding pointer. 380 return handle_evacuation_failure_par(old, old_mark); 381 } 382 } 383 update_numa_stats(node_index); 384 385 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { 386 // The events are checked individually as part of the actual commit 387 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index); 388 } 389 } 390 391 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 392 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 393 394 #ifndef PRODUCT 395 // Should this evacuation fail? 396 if (_g1h->evacuation_should_fail()) { 397 // Doing this after all the allocation attempts also tests the 398 // undo_allocation() method too. 399 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 400 return handle_evacuation_failure_par(old, old_mark); 401 } 402 #endif // !PRODUCT 403 404 // We're going to allocate linearly, so might as well prefetch ahead. 405 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 406 407 const oop obj = oop(obj_ptr); 408 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); 409 if (forward_ptr == NULL) { 410 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz); 411 412 const uint young_index = from_region->young_index_in_cset(); 413 414 assert((from_region->is_young() && young_index > 0) || 415 (!from_region->is_young() && young_index == 0), "invariant" ); 416 417 if (dest_attr.is_young()) { 418 if (age < markWord::max_age) { 419 age++; 420 } 421 if (old_mark.has_displaced_mark_helper()) { 422 // In this case, we have to install the mark word first, 423 // otherwise obj looks to be forwarded (the old mark word, 424 // which contains the forward pointer, was copied) 425 obj->set_mark_raw(old_mark); 426 markWord new_mark = old_mark.displaced_mark_helper().set_age(age); 427 old_mark.set_displaced_mark_helper(new_mark); 428 } else { 429 obj->set_mark_raw(old_mark.set_age(age)); 430 } 431 _age_table.add(age, word_sz); 432 } else { 433 obj->set_mark_raw(old_mark); 434 } 435 436 if (G1StringDedup::is_enabled()) { 437 const bool is_from_young = region_attr.is_young(); 438 const bool is_to_young = dest_attr.is_young(); 439 assert(is_from_young == from_region->is_young(), 440 "sanity"); 441 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), 442 "sanity"); 443 G1StringDedup::enqueue_from_evacuation(is_from_young, 444 is_to_young, 445 _worker_id, 446 obj); 447 } 448 449 _surviving_young_words[young_index] += word_sz; 450 451 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 452 // We keep track of the next start index in the length field of 453 // the to-space object. The actual length can be found in the 454 // length field of the from-space object. 455 arrayOop(obj)->set_length(0); 456 do_partial_array(PartialArrayScanTask(old)); 457 } else { 458 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young()); 459 obj->oop_iterate_backwards(&_scanner); 460 } 461 return obj; 462 } else { 463 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 464 return forward_ptr; 465 } 466 } 467 468 // Public not-inline entry point. 469 ATTRIBUTE_FLATTEN 470 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr, 471 oop old, 472 markWord old_mark) { 473 return do_copy_to_survivor_space(region_attr, old, old_mark); 474 } 475 476 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 477 assert(worker_id < _n_workers, "out of bounds access"); 478 if (_states[worker_id] == NULL) { 479 _states[worker_id] = 480 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length); 481 } 482 return _states[worker_id]; 483 } 484 485 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 486 assert(_flushed, "thread local state from the per thread states should have been flushed"); 487 return _surviving_young_words_total; 488 } 489 490 void G1ParScanThreadStateSet::flush() { 491 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 492 493 for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) { 494 G1ParScanThreadState* pss = _states[worker_id]; 495 496 if (pss == NULL) { 497 continue; 498 } 499 500 G1GCPhaseTimes* p = _g1h->phase_times(); 501 502 // Need to get the following two before the call to G1ParThreadScanState::flush() 503 // because it resets the PLAB allocator where we get this info from. 504 size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize; 505 size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize; 506 size_t copied_bytes = pss->flush(_surviving_young_words_total) * HeapWordSize; 507 508 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes); 509 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes); 510 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes); 511 512 delete pss; 513 _states[worker_id] = NULL; 514 } 515 _flushed = true; 516 } 517 518 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) { 519 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 520 G1ParScanThreadState* pss = _states[worker_index]; 521 522 if (pss == NULL) { 523 continue; 524 } 525 526 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory(); 527 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory); 528 } 529 } 530 531 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) { 532 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 533 534 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); 535 if (forward_ptr == NULL) { 536 // Forward-to-self succeeded. We are the "owner" of the object. 537 HeapRegion* r = _g1h->heap_region_containing(old); 538 539 if (!r->evacuation_failed()) { 540 r->set_evacuation_failed(true); 541 _g1h->hr_printer()->evac_failure(r); 542 } 543 544 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 545 546 G1ScanInYoungSetter x(&_scanner, r->is_young()); 547 old->oop_iterate_backwards(&_scanner); 548 549 return old; 550 } else { 551 // Forward-to-self failed. Either someone else managed to allocate 552 // space for this object (old != forward_ptr) or they beat us in 553 // self-forwarding it (old == forward_ptr). 554 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), 555 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 556 "should not be in the CSet", 557 p2i(old), p2i(forward_ptr)); 558 return forward_ptr; 559 } 560 } 561 562 void G1ParScanThreadState::initialize_numa_stats() { 563 if (_numa->is_enabled()) { 564 LogTarget(Info, gc, heap, numa) lt; 565 566 if (lt.is_enabled()) { 567 uint num_nodes = _numa->num_active_nodes(); 568 // Record only if there are multiple active nodes. 569 _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC); 570 memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes); 571 } 572 } 573 } 574 575 void G1ParScanThreadState::flush_numa_stats() { 576 if (_obj_alloc_stat != NULL) { 577 uint node_index = _numa->index_of_current_thread(); 578 _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat); 579 } 580 } 581 582 void G1ParScanThreadState::update_numa_stats(uint node_index) { 583 if (_obj_alloc_stat != NULL) { 584 _obj_alloc_stat[node_index]++; 585 } 586 } 587 588 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, 589 G1RedirtyCardsQueueSet* rdcqs, 590 uint n_workers, 591 size_t young_cset_length, 592 size_t optional_cset_length) : 593 _g1h(g1h), 594 _rdcqs(rdcqs), 595 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), 596 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)), 597 _young_cset_length(young_cset_length), 598 _optional_cset_length(optional_cset_length), 599 _n_workers(n_workers), 600 _flushed(false) { 601 for (uint i = 0; i < n_workers; ++i) { 602 _states[i] = NULL; 603 } 604 memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t)); 605 } 606 607 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { 608 assert(_flushed, "thread local state from the per thread states should have been flushed"); 609 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 610 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 611 }