1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 27 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 28 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "oops/oop.pcgc.inline.hpp" 31 #include "runtime/prefetch.inline.hpp" 32 33 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 34 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 35 #endif // _MSC_VER 36 37 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) 38 : _g1h(g1h), 39 _refs(g1h->task_queue(queue_num)), 40 _dcq(&g1h->dirty_card_queue_set()), 41 _ct_bs(g1h->g1_barrier_set()), 42 _g1_rem(g1h->g1_rem_set()), 43 _hash_seed(17), _queue_num(queue_num), 44 _term_attempts(0), 45 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 46 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 47 _age_table(false), _scanner(g1h, this, rp), 48 _strong_roots_time(0), _term_time(0), 49 _alloc_buffer_waste(0), _undo_waste(0) { 50 // we allocate G1YoungSurvRateNumRegions plus one entries, since 51 // we "sacrifice" entry 0 to keep track of surviving bytes for 52 // non-young regions (where the age is -1) 53 // We also add a few elements at the beginning and at the end in 54 // an attempt to eliminate cache contention 55 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); 56 uint array_length = PADDING_ELEM_NUM + 57 real_length + 58 PADDING_ELEM_NUM; 59 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 60 if (_surviving_young_words_base == NULL) 61 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, 62 "Not enough space for young surv histo."); 63 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 64 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); 65 66 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; 67 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; 68 69 _start = os::elapsedTime(); 70 } 71 72 void 73 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) 74 { 75 st->print_raw_cr("GC Termination Stats"); 76 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" 77 " ------waste (KiB)------"); 78 st->print_raw_cr("thr ms ms % ms % attempts" 79 " total alloc undo"); 80 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" 81 " ------- ------- -------"); 82 } 83 84 void 85 G1ParScanThreadState::print_termination_stats(int i, 86 outputStream* const st) const 87 { 88 const double elapsed_ms = elapsed_time() * 1000.0; 89 const double s_roots_ms = strong_roots_time() * 1000.0; 90 const double term_ms = term_time() * 1000.0; 91 st->print_cr("%3d %9.2f %9.2f %6.2f " 92 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " 93 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), 94 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 95 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), 96 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, 97 alloc_buffer_waste() * HeapWordSize / K, 98 undo_waste() * HeapWordSize / K); 99 } 100 101 #ifdef ASSERT 102 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 103 assert(ref != NULL, "invariant"); 104 assert(UseCompressedOops, "sanity"); 105 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref))); 106 oop p = oopDesc::load_decode_heap_oop(ref); 107 assert(_g1h->is_in_g1_reserved(p), 108 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 109 return true; 110 } 111 112 bool G1ParScanThreadState::verify_ref(oop* ref) const { 113 assert(ref != NULL, "invariant"); 114 if (has_partial_array_mask(ref)) { 115 // Must be in the collection set--it's already been copied. 116 oop p = clear_partial_array_mask(ref); 117 assert(_g1h->obj_in_cs(p), 118 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 119 } else { 120 oop p = oopDesc::load_decode_heap_oop(ref); 121 assert(_g1h->is_in_g1_reserved(p), 122 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 123 } 124 return true; 125 } 126 127 bool G1ParScanThreadState::verify_task(StarTask ref) const { 128 if (ref.is_narrow()) { 129 return verify_ref((narrowOop*) ref); 130 } else { 131 return verify_ref((oop*) ref); 132 } 133 } 134 #endif // ASSERT 135 136 void G1ParScanThreadState::trim_queue() { 137 assert(_evac_failure_cl != NULL, "not set"); 138 139 StarTask ref; 140 do { 141 // Drain the overflow stack first, so other threads can steal. 142 while (_refs->pop_overflow(ref)) { 143 deal_with_reference(ref); 144 } 145 146 while (_refs->pop_local(ref)) { 147 deal_with_reference(ref); 148 } 149 } while (!_refs->is_empty()); 150 } 151 152 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { 153 StarTask stolen_task; 154 while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) { 155 assert(verify_task(stolen_task), "sanity"); 156 deal_with_reference(stolen_task); 157 158 // We've just processed a reference and we might have made 159 // available new entries on the queues. So we have to make sure 160 // we drain the queues as necessary. 161 trim_queue(); 162 } 163 } 164 165 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { 166 size_t word_sz = old->size(); 167 HeapRegion* from_region = _g1h->heap_region_containing_raw(old); 168 // +1 to make the -1 indexes valid... 169 int young_index = from_region->young_index_in_cset()+1; 170 assert( (from_region->is_young() && young_index > 0) || 171 (!from_region->is_young() && young_index == 0), "invariant" ); 172 G1CollectorPolicy* g1p = _g1h->g1_policy(); 173 markOop m = old->mark(); 174 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 175 : m->age(); 176 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 177 word_sz); 178 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz); 179 #ifndef PRODUCT 180 // Should this evacuation fail? 181 if (_g1h->evacuation_should_fail()) { 182 if (obj_ptr != NULL) { 183 undo_allocation(alloc_purpose, obj_ptr, word_sz); 184 obj_ptr = NULL; 185 } 186 } 187 #endif // !PRODUCT 188 189 if (obj_ptr == NULL) { 190 // This will either forward-to-self, or detect that someone else has 191 // installed a forwarding pointer. 192 return _g1h->handle_evacuation_failure_par(this, old); 193 } 194 195 oop obj = oop(obj_ptr); 196 197 // We're going to allocate linearly, so might as well prefetch ahead. 198 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 199 200 oop forward_ptr = old->forward_to_atomic(obj); 201 if (forward_ptr == NULL) { 202 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 203 204 // alloc_purpose is just a hint to allocate() above, recheck the type of region 205 // we actually allocated from and update alloc_purpose accordingly 206 HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr); 207 alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured; 208 209 if (g1p->track_object_age(alloc_purpose)) { 210 // We could simply do obj->incr_age(). However, this causes a 211 // performance issue. obj->incr_age() will first check whether 212 // the object has a displaced mark by checking its mark word; 213 // getting the mark word from the new location of the object 214 // stalls. So, given that we already have the mark word and we 215 // are about to install it anyway, it's better to increase the 216 // age on the mark word, when the object does not have a 217 // displaced mark word. We're not expecting many objects to have 218 // a displaced marked word, so that case is not optimized 219 // further (it could be...) and we simply call obj->incr_age(). 220 221 if (m->has_displaced_mark_helper()) { 222 // in this case, we have to install the mark word first, 223 // otherwise obj looks to be forwarded (the old mark word, 224 // which contains the forward pointer, was copied) 225 obj->set_mark(m); 226 obj->incr_age(); 227 } else { 228 m = m->incr_age(); 229 obj->set_mark(m); 230 } 231 age_table()->add(obj, word_sz); 232 } else { 233 obj->set_mark(m); 234 } 235 236 if (G1StringDedup::is_enabled()) { 237 G1StringDedup::enqueue_from_evacuation(from_region->is_young(), 238 to_region->is_young(), 239 queue_num(), 240 obj); 241 } 242 243 size_t* surv_young_words = surviving_young_words(); 244 surv_young_words[young_index] += word_sz; 245 246 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 247 // We keep track of the next start index in the length field of 248 // the to-space object. The actual length can be found in the 249 // length field of the from-space object. 250 arrayOop(obj)->set_length(0); 251 oop* old_p = set_partial_array_mask(old); 252 push_on_queue(old_p); 253 } else { 254 // No point in using the slower heap_region_containing() method, 255 // given that we know obj is in the heap. 256 _scanner.set_region(_g1h->heap_region_containing_raw(obj)); 257 obj->oop_iterate_backwards(&_scanner); 258 } 259 } else { 260 undo_allocation(alloc_purpose, obj_ptr, word_sz); 261 obj = forward_ptr; 262 } 263 return obj; 264 } 265 266 HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 267 HeapWord* obj = NULL; 268 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 269 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 270 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 271 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 272 alloc_buf->retire(false /* end_of_gc */, false /* retain */); 273 274 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 275 if (buf == NULL) { 276 return NULL; // Let caller handle allocation failure. 277 } 278 // Otherwise. 279 alloc_buf->set_word_size(gclab_word_size); 280 alloc_buf->set_buf(buf); 281 282 obj = alloc_buf->allocate(word_sz); 283 assert(obj != NULL, "buffer was definitely big enough..."); 284 } else { 285 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 286 } 287 return obj; 288 } 289 290 void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { 291 if (alloc_buffer(purpose)->contains(obj)) { 292 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), 293 "should contain whole object"); 294 alloc_buffer(purpose)->undo_allocation(obj, word_sz); 295 } else { 296 CollectedHeap::fill_with_object(obj, word_sz); 297 add_to_undo_waste(word_sz); 298 } 299 } 300 301 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) { 302 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); 303 if (obj != NULL) { 304 return obj; 305 } 306 return allocate_slow(purpose, word_sz); 307 } 308 309 void G1ParScanThreadState::retire_alloc_buffers() { 310 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 311 size_t waste = _alloc_buffers[ap]->words_remaining(); 312 add_to_alloc_buffer_waste(waste); 313 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 314 true /* end_of_gc */, 315 false /* retain */); 316 } 317 } 318 319 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) { 320 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), 321 "Reference should not be NULL here as such are never pushed to the task queue."); 322 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 323 324 // Although we never intentionally push references outside of the collection 325 // set, due to (benign) races in the claim mechanism during RSet scanning more 326 // than one thread might claim the same card. So the same card may be 327 // processed multiple times. So redo this check. 328 if (_g1h->in_cset_fast_test(obj)) { 329 oop forwardee; 330 if (obj->is_forwarded()) { 331 forwardee = obj->forwardee(); 332 } else { 333 forwardee = copy_to_survivor_space(obj); 334 } 335 assert(forwardee != NULL, "forwardee should not be NULL"); 336 oopDesc::encode_store_heap_oop(p, forwardee); 337 } 338 339 assert(obj != NULL, "Must be"); 340 update_rs(from, p, queue_num()); 341 } 342 343 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { 344 assert(has_partial_array_mask(p), "invariant"); 345 oop from_obj = clear_partial_array_mask(p); 346 347 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); 348 assert(from_obj->is_objArray(), "must be obj array"); 349 objArrayOop from_obj_array = objArrayOop(from_obj); 350 // The from-space object contains the real length. 351 int length = from_obj_array->length(); 352 353 assert(from_obj->is_forwarded(), "must be forwarded"); 354 oop to_obj = from_obj->forwardee(); 355 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 356 objArrayOop to_obj_array = objArrayOop(to_obj); 357 // We keep track of the next start index in the length field of the 358 // to-space object. 359 int next_index = to_obj_array->length(); 360 assert(0 <= next_index && next_index < length, 361 err_msg("invariant, next index: %d, length: %d", next_index, length)); 362 363 int start = next_index; 364 int end = length; 365 int remainder = end - start; 366 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. 367 if (remainder > 2 * ParGCArrayScanChunk) { 368 end = start + ParGCArrayScanChunk; 369 to_obj_array->set_length(end); 370 // Push the remainder before we process the range in case another 371 // worker has run out of things to do and can steal it. 372 oop* from_obj_p = set_partial_array_mask(from_obj); 373 push_on_queue(from_obj_p); 374 } else { 375 assert(length == end, "sanity"); 376 // We'll process the final range for this object. Restore the length 377 // so that the heap remains parsable in case of evacuation failure. 378 to_obj_array->set_length(end); 379 } 380 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); 381 // Process indexes [start,end). It will also process the header 382 // along with the first chunk (i.e., the chunk with start == 0). 383 // Note that at this point the length field of to_obj_array is not 384 // correct given that we are using it to keep track of the next 385 // start index. oop_iterate_range() (thankfully!) ignores the length 386 // field and only relies on the start / end parameters. It does 387 // however return the size of the object which will be incorrect. So 388 // we have to ignore it even if we wanted to use it. 389 to_obj_array->oop_iterate_range(&_scanner, start, end); 390 } 391 392 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { 393 if (!has_partial_array_mask(ref_to_scan)) { 394 // Note: we can use "raw" versions of "region_containing" because 395 // "obj_to_scan" is definitely in the heap, and is not in a 396 // humongous region. 397 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 398 do_oop_evac(ref_to_scan, r); 399 } else { 400 do_oop_partial_array((oop*)ref_to_scan); 401 } 402 } 403 404 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { 405 assert(verify_task(ref), "sanity"); 406 if (ref.is_narrow()) { 407 deal_with_reference((narrowOop*)ref); 408 } else { 409 deal_with_reference((oop*)ref); 410 } 411 }