1 /* 2 * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectionSet.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1RootClosures.hpp" 32 #include "gc/g1/g1StringDedup.hpp" 33 #include "gc/g1/g1Trace.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/prefetch.inline.hpp" 39 40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, 41 G1RedirtyCardsQueueSet* rdcqs, 42 uint worker_id, 43 size_t young_cset_length, 44 size_t optional_cset_length) 45 : _g1h(g1h), 46 _task_queue(g1h->task_queue(worker_id)), 47 _rdcq(rdcqs), 48 _ct(g1h->card_table()), 49 _closures(NULL), 50 _plab_allocator(NULL), 51 _age_table(false), 52 _tenuring_threshold(g1h->policy()->tenuring_threshold()), 53 _scanner(g1h, this), 54 _worker_id(worker_id), 55 _last_enqueued_card(SIZE_MAX), 56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), 57 _stack_trim_lower_threshold(GCDrainStackTargetSize), 58 _trim_ticks(), 59 _surviving_young_words_base(NULL), 60 _surviving_young_words(NULL), 61 _surviving_words_length(young_cset_length + 1), 62 _old_gen_is_full(false), 63 _num_optional_regions(optional_cset_length), 64 _numa(g1h->numa()), 65 _obj_alloc_stat(NULL) 66 { 67 // We allocate number of young gen regions in the collection set plus one 68 // entries, since entry 0 keeps track of surviving bytes for non-young regions. 69 // We also add a few elements at the beginning and at the end in 70 // an attempt to eliminate cache contention 71 const size_t padding_elem_num = (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)); 72 size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num; 73 74 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 75 _surviving_young_words = _surviving_young_words_base + padding_elem_num; 76 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t)); 77 78 _plab_allocator = new G1PLABAllocator(_g1h->allocator()); 79 80 // The dest for Young is used when the objects are aged enough to 81 // need to be moved to the next space. 82 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old; 83 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old; 84 85 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); 86 87 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions]; 88 89 initialize_numa_stats(); 90 } 91 92 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) { 93 _rdcq.flush(); 94 flush_numa_stats(); 95 // Update allocation statistics. 96 _plab_allocator->flush_and_retire_stats(); 97 _g1h->policy()->record_age_table(&_age_table); 98 99 size_t sum = 0; 100 for (uint i = 0; i < _surviving_words_length; i++) { 101 surviving_young_words[i] += _surviving_young_words[i]; 102 sum += _surviving_young_words[i]; 103 } 104 return sum; 105 } 106 107 G1ParScanThreadState::~G1ParScanThreadState() { 108 delete _plab_allocator; 109 delete _closures; 110 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 111 delete[] _oops_into_optional_regions; 112 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat); 113 } 114 115 size_t G1ParScanThreadState::lab_waste_words() const { 116 return _plab_allocator->waste(); 117 } 118 119 size_t G1ParScanThreadState::lab_undo_waste_words() const { 120 return _plab_allocator->undo_waste(); 121 } 122 123 #ifdef ASSERT 124 void G1ParScanThreadState::verify_task(narrowOop* task) const { 125 assert(task != NULL, "invariant"); 126 assert(UseCompressedOops, "sanity"); 127 oop p = RawAccess<>::oop_load(task); 128 assert(_g1h->is_in_g1_reserved(p), 129 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 130 } 131 132 void G1ParScanThreadState::verify_task(oop* task) const { 133 assert(task != NULL, "invariant"); 134 oop p = RawAccess<>::oop_load(task); 135 assert(_g1h->is_in_g1_reserved(p), 136 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 137 } 138 139 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const { 140 // Must be in the collection set--it's already been copied. 141 oop p = task.to_source_array(); 142 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p)); 143 } 144 145 void G1ParScanThreadState::verify_task(ScannerTask task) const { 146 if (task.is_narrow_oop_ptr()) { 147 verify_task(task.to_narrow_oop_ptr()); 148 } else if (task.is_oop_ptr()) { 149 verify_task(task.to_oop_ptr()); 150 } else if (task.is_partial_array_task()) { 151 verify_task(task.to_partial_array_task()); 152 } else { 153 ShouldNotReachHere(); 154 } 155 } 156 #endif // ASSERT 157 158 void G1ParScanThreadState::trim_queue() { 159 do { 160 // Fully drain the queue. 161 trim_queue_to_threshold(0); 162 } while (!_task_queue->is_empty()); 163 } 164 165 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, 166 size_t word_sz, 167 bool previous_plab_refill_failed, 168 uint node_index) { 169 170 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str()); 171 172 // Right now we only have two types of regions (young / old) so 173 // let's keep the logic here simple. We can generalize it when necessary. 174 if (dest->is_young()) { 175 bool plab_refill_in_old_failed = false; 176 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, 177 word_sz, 178 &plab_refill_in_old_failed, 179 node_index); 180 // Make sure that we won't attempt to copy any other objects out 181 // of a survivor region (given that apparently we cannot allocate 182 // any new ones) to avoid coming into this slow path again and again. 183 // Only consider failed PLAB refill here: failed inline allocations are 184 // typically large, so not indicative of remaining space. 185 if (previous_plab_refill_failed) { 186 _tenuring_threshold = 0; 187 } 188 189 if (obj_ptr != NULL) { 190 dest->set_old(); 191 } else { 192 // We just failed to allocate in old gen. The same idea as explained above 193 // for making survivor gen unavailable for allocation applies for old gen. 194 _old_gen_is_full = plab_refill_in_old_failed; 195 } 196 return obj_ptr; 197 } else { 198 _old_gen_is_full = previous_plab_refill_failed; 199 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str()); 200 // no other space to try. 201 return NULL; 202 } 203 } 204 205 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) { 206 if (region_attr.is_young()) { 207 age = !m.has_displaced_mark_helper() ? m.age() 208 : m.displaced_mark_helper().age(); 209 if (age < _tenuring_threshold) { 210 return region_attr; 211 } 212 } 213 return dest(region_attr); 214 } 215 216 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, 217 oop const old, size_t word_sz, uint age, 218 HeapWord * const obj_ptr, uint node_index) const { 219 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); 220 if (alloc_buf->contains(obj_ptr)) { 221 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, 222 dest_attr.type() == G1HeapRegionAttr::Old, 223 alloc_buf->word_sz() * HeapWordSize); 224 } else { 225 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, 226 dest_attr.type() == G1HeapRegionAttr::Old); 227 } 228 } 229 230 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr, 231 oop const old, 232 markWord const old_mark) { 233 const size_t word_sz = old->size(); 234 235 uint age = 0; 236 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age); 237 // The second clause is to prevent premature evacuation failure in case there 238 // is still space in survivor, but old gen is full. 239 if (_old_gen_is_full && dest_attr.is_old()) { 240 return handle_evacuation_failure_par(old, old_mark); 241 } 242 HeapRegion* const from_region = _g1h->heap_region_containing(old); 243 uint node_index = from_region->node_index(); 244 245 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); 246 247 // PLAB allocations should succeed most of the time, so we'll 248 // normally check against NULL once and that's it. 249 if (obj_ptr == NULL) { 250 bool plab_refill_failed = false; 251 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index); 252 if (obj_ptr == NULL) { 253 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str()); 254 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index); 255 if (obj_ptr == NULL) { 256 // This will either forward-to-self, or detect that someone else has 257 // installed a forwarding pointer. 258 return handle_evacuation_failure_par(old, old_mark); 259 } 260 } 261 update_numa_stats(node_index); 262 263 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { 264 // The events are checked individually as part of the actual commit 265 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index); 266 } 267 } 268 269 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 270 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 271 272 #ifndef PRODUCT 273 // Should this evacuation fail? 274 if (_g1h->evacuation_should_fail()) { 275 // Doing this after all the allocation attempts also tests the 276 // undo_allocation() method too. 277 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 278 return handle_evacuation_failure_par(old, old_mark); 279 } 280 #endif // !PRODUCT 281 282 // We're going to allocate linearly, so might as well prefetch ahead. 283 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 284 285 const oop obj = oop(obj_ptr); 286 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); 287 if (forward_ptr == NULL) { 288 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz); 289 290 const uint young_index = from_region->young_index_in_cset(); 291 292 assert((from_region->is_young() && young_index > 0) || 293 (!from_region->is_young() && young_index == 0), "invariant" ); 294 295 if (dest_attr.is_young()) { 296 if (age < markWord::max_age) { 297 age++; 298 } 299 if (old_mark.has_displaced_mark_helper()) { 300 // In this case, we have to install the mark word first, 301 // otherwise obj looks to be forwarded (the old mark word, 302 // which contains the forward pointer, was copied) 303 obj->set_mark_raw(old_mark); 304 markWord new_mark = old_mark.displaced_mark_helper().set_age(age); 305 old_mark.set_displaced_mark_helper(new_mark); 306 } else { 307 obj->set_mark_raw(old_mark.set_age(age)); 308 } 309 _age_table.add(age, word_sz); 310 } else { 311 obj->set_mark_raw(old_mark); 312 } 313 314 if (G1StringDedup::is_enabled()) { 315 const bool is_from_young = region_attr.is_young(); 316 const bool is_to_young = dest_attr.is_young(); 317 assert(is_from_young == from_region->is_young(), 318 "sanity"); 319 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), 320 "sanity"); 321 G1StringDedup::enqueue_from_evacuation(is_from_young, 322 is_to_young, 323 _worker_id, 324 obj); 325 } 326 327 _surviving_young_words[young_index] += word_sz; 328 329 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 330 // We keep track of the next start index in the length field of 331 // the to-space object. The actual length can be found in the 332 // length field of the from-space object. 333 arrayOop(obj)->set_length(0); 334 do_partial_array(PartialArrayScanTask(old)); 335 } else { 336 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young()); 337 obj->oop_iterate_backwards(&_scanner); 338 } 339 return obj; 340 } else { 341 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 342 return forward_ptr; 343 } 344 } 345 346 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 347 assert(worker_id < _n_workers, "out of bounds access"); 348 if (_states[worker_id] == NULL) { 349 _states[worker_id] = 350 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length); 351 } 352 return _states[worker_id]; 353 } 354 355 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 356 assert(_flushed, "thread local state from the per thread states should have been flushed"); 357 return _surviving_young_words_total; 358 } 359 360 void G1ParScanThreadStateSet::flush() { 361 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 362 363 for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) { 364 G1ParScanThreadState* pss = _states[worker_id]; 365 366 if (pss == NULL) { 367 continue; 368 } 369 370 G1GCPhaseTimes* p = _g1h->phase_times(); 371 372 // Need to get the following two before the call to G1ParThreadScanState::flush() 373 // because it resets the PLAB allocator where we get this info from. 374 size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize; 375 size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize; 376 size_t copied_bytes = pss->flush(_surviving_young_words_total) * HeapWordSize; 377 378 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes); 379 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes); 380 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes); 381 382 delete pss; 383 _states[worker_id] = NULL; 384 } 385 _flushed = true; 386 } 387 388 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) { 389 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 390 G1ParScanThreadState* pss = _states[worker_index]; 391 392 if (pss == NULL) { 393 continue; 394 } 395 396 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory(); 397 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory); 398 } 399 } 400 401 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) { 402 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 403 404 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); 405 if (forward_ptr == NULL) { 406 // Forward-to-self succeeded. We are the "owner" of the object. 407 HeapRegion* r = _g1h->heap_region_containing(old); 408 409 if (!r->evacuation_failed()) { 410 r->set_evacuation_failed(true); 411 _g1h->hr_printer()->evac_failure(r); 412 } 413 414 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 415 416 G1ScanInYoungSetter x(&_scanner, r->is_young()); 417 old->oop_iterate_backwards(&_scanner); 418 419 return old; 420 } else { 421 // Forward-to-self failed. Either someone else managed to allocate 422 // space for this object (old != forward_ptr) or they beat us in 423 // self-forwarding it (old == forward_ptr). 424 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), 425 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 426 "should not be in the CSet", 427 p2i(old), p2i(forward_ptr)); 428 return forward_ptr; 429 } 430 } 431 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, 432 G1RedirtyCardsQueueSet* rdcqs, 433 uint n_workers, 434 size_t young_cset_length, 435 size_t optional_cset_length) : 436 _g1h(g1h), 437 _rdcqs(rdcqs), 438 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), 439 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)), 440 _young_cset_length(young_cset_length), 441 _optional_cset_length(optional_cset_length), 442 _n_workers(n_workers), 443 _flushed(false) { 444 for (uint i = 0; i < n_workers; ++i) { 445 _states[i] = NULL; 446 } 447 memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t)); 448 } 449 450 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { 451 assert(_flushed, "thread local state from the per thread states should have been flushed"); 452 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 453 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 454 }