1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1OopClosures.inline.hpp" 28 #include "gc/g1/g1ParScanThreadState.inline.hpp" 29 #include "gc/g1/g1StringDedup.hpp" 30 #include "gc/shared/taskqueue.inline.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/prefetch.inline.hpp" 33 34 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp) 35 : _g1h(g1h), 36 _refs(g1h->task_queue(worker_id)), 37 _dcq(&g1h->dirty_card_queue_set()), 38 _ct_bs(g1h->g1_barrier_set()), 39 _g1_rem(g1h->g1_rem_set()), 40 _hash_seed(17), _worker_id(worker_id), 41 _term_attempts(0), 42 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), 43 _age_table(false), _scanner(g1h, rp), 44 _strong_roots_time(0), _term_time(0), 45 _old_gen_is_full(false) 46 { 47 _scanner.set_par_scan_thread_state(this); 48 // we allocate G1YoungSurvRateNumRegions plus one entries, since 49 // we "sacrifice" entry 0 to keep track of surviving bytes for 50 // non-young regions (where the age is -1) 51 // We also add a few elements at the beginning and at the end in 52 // an attempt to eliminate cache contention 53 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); 54 uint array_length = PADDING_ELEM_NUM + 55 real_length + 56 PADDING_ELEM_NUM; 57 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 58 if (_surviving_young_words_base == NULL) 59 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, 60 "Not enough space for young surv histo."); 61 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 62 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); 63 64 _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator()); 65 66 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet; 67 // The dest for Young is used when the objects are aged enough to 68 // need to be moved to the next space. 69 _dest[InCSetState::Young] = InCSetState::Old; 70 _dest[InCSetState::Old] = InCSetState::Old; 71 72 _start = os::elapsedTime(); 73 } 74 75 G1ParScanThreadState::~G1ParScanThreadState() { 76 _plab_allocator->retire_alloc_buffers(); 77 delete _plab_allocator; 78 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 79 } 80 81 void G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) { 82 st->print_raw_cr("GC Termination Stats"); 83 st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------"); 84 st->print_raw_cr("thr ms ms % ms % attempts total alloc undo"); 85 st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------"); 86 } 87 88 void G1ParScanThreadState::print_termination_stats(outputStream* const st) const { 89 const double elapsed_ms = elapsed_time() * 1000.0; 90 const double s_roots_ms = strong_roots_time() * 1000.0; 91 const double term_ms = term_time() * 1000.0; 92 size_t alloc_buffer_waste = 0; 93 size_t undo_waste = 0; 94 _plab_allocator->waste(alloc_buffer_waste, undo_waste); 95 st->print_cr("%3u %9.2f %9.2f %6.2f " 96 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " 97 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), 98 _worker_id, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 99 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), 100 (alloc_buffer_waste + undo_waste) * HeapWordSize / K, 101 alloc_buffer_waste * HeapWordSize / K, 102 undo_waste * HeapWordSize / K); 103 } 104 105 #ifdef ASSERT 106 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 107 assert(ref != NULL, "invariant"); 108 assert(UseCompressedOops, "sanity"); 109 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref))); 110 oop p = oopDesc::load_decode_heap_oop(ref); 111 assert(_g1h->is_in_g1_reserved(p), 112 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 113 return true; 114 } 115 116 bool G1ParScanThreadState::verify_ref(oop* ref) const { 117 assert(ref != NULL, "invariant"); 118 if (has_partial_array_mask(ref)) { 119 // Must be in the collection set--it's already been copied. 120 oop p = clear_partial_array_mask(ref); 121 assert(_g1h->obj_in_cs(p), 122 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 123 } else { 124 oop p = oopDesc::load_decode_heap_oop(ref); 125 assert(_g1h->is_in_g1_reserved(p), 126 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 127 } 128 return true; 129 } 130 131 bool G1ParScanThreadState::verify_task(StarTask ref) const { 132 if (ref.is_narrow()) { 133 return verify_ref((narrowOop*) ref); 134 } else { 135 return verify_ref((oop*) ref); 136 } 137 } 138 #endif // ASSERT 139 140 void G1ParScanThreadState::trim_queue() { 141 StarTask ref; 142 do { 143 // Drain the overflow stack first, so other threads can steal. 144 while (_refs->pop_overflow(ref)) { 145 dispatch_reference(ref); 146 } 147 148 while (_refs->pop_local(ref)) { 149 dispatch_reference(ref); 150 } 151 } while (!_refs->is_empty()); 152 } 153 154 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 155 InCSetState* dest, 156 size_t word_sz, 157 AllocationContext_t const context, 158 bool previous_plab_refill_failed) { 159 assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value())); 160 assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 161 162 // Right now we only have two types of regions (young / old) so 163 // let's keep the logic here simple. We can generalize it when necessary. 164 if (dest->is_young()) { 165 bool plab_refill_in_old_failed = false; 166 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, 167 word_sz, 168 context, 169 &plab_refill_in_old_failed); 170 // Make sure that we won't attempt to copy any other objects out 171 // of a survivor region (given that apparently we cannot allocate 172 // any new ones) to avoid coming into this slow path again and again. 173 // Only consider failed PLAB refill here: failed inline allocations are 174 // typically large, so not indicative of remaining space. 175 if (previous_plab_refill_failed) { 176 _tenuring_threshold = 0; 177 } 178 179 if (obj_ptr != NULL) { 180 dest->set_old(); 181 } else { 182 // We just failed to allocate in old gen. The same idea as explained above 183 // for making survivor gen unavailable for allocation applies for old gen. 184 _old_gen_is_full = plab_refill_in_old_failed; 185 } 186 return obj_ptr; 187 } else { 188 _old_gen_is_full = previous_plab_refill_failed; 189 assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 190 // no other space to try. 191 return NULL; 192 } 193 } 194 195 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 196 if (state.is_young()) { 197 age = !m->has_displaced_mark_helper() ? m->age() 198 : m->displaced_mark_helper()->age(); 199 if (age < _tenuring_threshold) { 200 return state; 201 } 202 } 203 return dest(state); 204 } 205 206 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, 207 oop const old, 208 markOop const old_mark) { 209 const size_t word_sz = old->size(); 210 HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); 211 // +1 to make the -1 indexes valid... 212 const int young_index = from_region->young_index_in_cset()+1; 213 assert( (from_region->is_young() && young_index > 0) || 214 (!from_region->is_young() && young_index == 0), "invariant" ); 215 const AllocationContext_t context = from_region->allocation_context(); 216 217 uint age = 0; 218 InCSetState dest_state = next_state(state, old_mark, age); 219 // The second clause is to prevent premature evacuation failure in case there 220 // is still space in survivor, but old gen is full. 221 if (_old_gen_is_full && dest_state.is_old()) { 222 return handle_evacuation_failure_par(old, old_mark); 223 } 224 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); 225 226 // PLAB allocations should succeed most of the time, so we'll 227 // normally check against NULL once and that's it. 228 if (obj_ptr == NULL) { 229 bool plab_refill_failed = false; 230 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed); 231 if (obj_ptr == NULL) { 232 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed); 233 if (obj_ptr == NULL) { 234 // This will either forward-to-self, or detect that someone else has 235 // installed a forwarding pointer. 236 return handle_evacuation_failure_par(old, old_mark); 237 } 238 } 239 } 240 241 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 242 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 243 244 #ifndef PRODUCT 245 // Should this evacuation fail? 246 if (_g1h->evacuation_should_fail()) { 247 // Doing this after all the allocation attempts also tests the 248 // undo_allocation() method too. 249 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 250 return handle_evacuation_failure_par(old, old_mark); 251 } 252 #endif // !PRODUCT 253 254 // We're going to allocate linearly, so might as well prefetch ahead. 255 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 256 257 const oop obj = oop(obj_ptr); 258 const oop forward_ptr = old->forward_to_atomic(obj); 259 if (forward_ptr == NULL) { 260 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 261 262 if (dest_state.is_young()) { 263 if (age < markOopDesc::max_age) { 264 age++; 265 } 266 if (old_mark->has_displaced_mark_helper()) { 267 // In this case, we have to install the mark word first, 268 // otherwise obj looks to be forwarded (the old mark word, 269 // which contains the forward pointer, was copied) 270 obj->set_mark(old_mark); 271 markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); 272 old_mark->set_displaced_mark_helper(new_mark); 273 } else { 274 obj->set_mark(old_mark->set_age(age)); 275 } 276 age_table()->add(age, word_sz); 277 } else { 278 obj->set_mark(old_mark); 279 } 280 281 if (G1StringDedup::is_enabled()) { 282 const bool is_from_young = state.is_young(); 283 const bool is_to_young = dest_state.is_young(); 284 assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), 285 "sanity"); 286 assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), 287 "sanity"); 288 G1StringDedup::enqueue_from_evacuation(is_from_young, 289 is_to_young, 290 _worker_id, 291 obj); 292 } 293 294 size_t* const surv_young_words = surviving_young_words(); 295 surv_young_words[young_index] += word_sz; 296 297 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 298 // We keep track of the next start index in the length field of 299 // the to-space object. The actual length can be found in the 300 // length field of the from-space object. 301 arrayOop(obj)->set_length(0); 302 oop* old_p = set_partial_array_mask(old); 303 push_on_queue(old_p); 304 } else { 305 HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); 306 _scanner.set_region(to_region); 307 obj->oop_iterate_backwards(&_scanner); 308 } 309 return obj; 310 } else { 311 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 312 return forward_ptr; 313 } 314 } 315 316 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { 317 assert(_g1h->obj_in_cs(old), 318 err_msg("Object " PTR_FORMAT " should be in the CSet", p2i(old))); 319 320 oop forward_ptr = old->forward_to_atomic(old); 321 if (forward_ptr == NULL) { 322 // Forward-to-self succeeded. We are the "owner" of the object. 323 HeapRegion* r = _g1h->heap_region_containing(old); 324 325 if (!r->evacuation_failed()) { 326 r->set_evacuation_failed(true); 327 _g1h->hr_printer()->evac_failure(r); 328 } 329 330 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 331 332 _scanner.set_region(r); 333 old->oop_iterate_backwards(&_scanner); 334 335 return old; 336 } else { 337 // Forward-to-self failed. Either someone else managed to allocate 338 // space for this object (old != forward_ptr) or they beat us in 339 // self-forwarding it (old == forward_ptr). 340 assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), 341 err_msg("Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 342 "should not be in the CSet", 343 p2i(old), p2i(forward_ptr))); 344 return forward_ptr; 345 } 346 } 347