1 /*
   2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1CollectionSet.hpp"
  29 #include "gc/g1/g1OopClosures.inline.hpp"
  30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  31 #include "gc/g1/g1RootClosures.hpp"
  32 #include "gc/g1/g1StringDedup.hpp"
  33 #include "gc/shared/gcTrace.hpp"
  34 #include "gc/shared/taskqueue.inline.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "oops/access.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/prefetch.inline.hpp"
  39 
  40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
  41                                            uint worker_id,
  42                                            size_t young_cset_length,
  43                                            size_t optional_cset_length)
  44   : _g1h(g1h),
  45     _refs(g1h->task_queue(worker_id)),
  46     _dcq(&g1h->dirty_card_queue_set()),
  47     _ct(g1h->card_table()),
  48     _closures(NULL),
  49     _plab_allocator(NULL),
  50     _age_table(false),
  51     _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
  52     _scanner(g1h, this),
  53     _worker_id(worker_id),
  54     _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
  55     _stack_trim_lower_threshold(GCDrainStackTargetSize),
  56     _trim_ticks(),
  57     _old_gen_is_full(false),
  58     _num_optional_regions(optional_cset_length)
  59 {
  60   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  61   // we "sacrifice" entry 0 to keep track of surviving bytes for
  62   // non-young regions (where the age is -1)
  63   // We also add a few elements at the beginning and at the end in
  64   // an attempt to eliminate cache contention
  65   size_t real_length = 1 + young_cset_length;
  66   size_t array_length = PADDING_ELEM_NUM +
  67                       real_length +
  68                       PADDING_ELEM_NUM;
  69   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  70   if (_surviving_young_words_base == NULL)
  71     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  72                           "Not enough space for young surv histo.");
  73   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  74   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  75 
  76   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
  77 
  78   _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
  79   // The dest for Young is used when the objects are aged enough to
  80   // need to be moved to the next space.
  81   _dest[InCSetState::Young]        = InCSetState::Old;
  82   _dest[InCSetState::Old]          = InCSetState::Old;
  83 
  84   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
  85 
  86   _oops_into_optional_regions = NEW_C_HEAP_ARRAY(G1OopStarChunkedList, _num_optional_regions, mtGC);
  87   for (size_t i = 0; i < _num_optional_regions; i++) {
  88     ::new (_oops_into_optional_regions + i) G1OopStarChunkedList();
  89   }
  90 }
  91 
  92 // Pass locally gathered statistics to global state.
  93 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
  94   _dcq.flush();
  95   // Update allocation statistics.
  96   _plab_allocator->flush_and_retire_stats();
  97   _g1h->g1_policy()->record_age_table(&_age_table);
  98 
  99   uint length = _g1h->collection_set()->young_region_length();
 100   for (uint region_index = 0; region_index < length; region_index++) {
 101     surviving_young_words[region_index] += _surviving_young_words[region_index];
 102   }
 103 }
 104 
 105 G1ParScanThreadState::~G1ParScanThreadState() {
 106   delete _plab_allocator;
 107   delete _closures;
 108   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 109   size_t used_by_optional = 0;
 110   for (size_t i = 0; i < _num_optional_regions; i++) {
 111     used_by_optional += _oops_into_optional_regions[i].free_chunk_lists();
 112   }
 113   _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::OptScanRS, _worker_id, used_by_optional, G1GCPhaseTimes::OptCSetUsedMemory);
 114   FREE_C_HEAP_ARRAY(G1OopStarChunkedList, _oops_into_optional_regions);
 115 }
 116 
 117 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
 118   _plab_allocator->waste(wasted, undo_wasted);
 119 }
 120 
 121 #ifdef ASSERT
 122 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
 123   assert(ref != NULL, "invariant");
 124   assert(UseCompressedOops, "sanity");
 125   assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
 126   oop p = RawAccess<>::oop_load(ref);
 127   assert(_g1h->is_in_g1_reserved(p),
 128          "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
 129   return true;
 130 }
 131 
 132 bool G1ParScanThreadState::verify_ref(oop* ref) const {
 133   assert(ref != NULL, "invariant");
 134   if (has_partial_array_mask(ref)) {
 135     // Must be in the collection set--it's already been copied.
 136     oop p = clear_partial_array_mask(ref);
 137     assert(_g1h->is_in_cset(p),
 138            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
 139   } else {
 140     oop p = RawAccess<>::oop_load(ref);
 141     assert(_g1h->is_in_g1_reserved(p),
 142            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
 143   }
 144   return true;
 145 }
 146 
 147 bool G1ParScanThreadState::verify_task(StarTask ref) const {
 148   if (ref.is_narrow()) {
 149     return verify_ref((narrowOop*) ref);
 150   } else {
 151     return verify_ref((oop*) ref);
 152   }
 153 }
 154 #endif // ASSERT
 155 
 156 void G1ParScanThreadState::trim_queue() {
 157   StarTask ref;
 158   do {
 159     // Fully drain the queue.
 160     trim_queue_to_threshold(0);
 161   } while (!_refs->is_empty());
 162 }
 163 
 164 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
 165                                                       InCSetState* dest,
 166                                                       size_t word_sz,
 167                                                       bool previous_plab_refill_failed) {
 168   assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
 169   assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
 170 
 171   // Right now we only have two types of regions (young / old) so
 172   // let's keep the logic here simple. We can generalize it when necessary.
 173   if (dest->is_young()) {
 174     bool plab_refill_in_old_failed = false;
 175     HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
 176                                                         word_sz,
 177                                                         &plab_refill_in_old_failed);
 178     // Make sure that we won't attempt to copy any other objects out
 179     // of a survivor region (given that apparently we cannot allocate
 180     // any new ones) to avoid coming into this slow path again and again.
 181     // Only consider failed PLAB refill here: failed inline allocations are
 182     // typically large, so not indicative of remaining space.
 183     if (previous_plab_refill_failed) {
 184       _tenuring_threshold = 0;
 185     }
 186 
 187     if (obj_ptr != NULL) {
 188       dest->set_old();
 189     } else {
 190       // We just failed to allocate in old gen. The same idea as explained above
 191       // for making survivor gen unavailable for allocation applies for old gen.
 192       _old_gen_is_full = plab_refill_in_old_failed;
 193     }
 194     return obj_ptr;
 195   } else {
 196     _old_gen_is_full = previous_plab_refill_failed;
 197     assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
 198     // no other space to try.
 199     return NULL;
 200   }
 201 }
 202 
 203 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
 204   if (state.is_young()) {
 205     age = !m->has_displaced_mark_helper() ? m->age()
 206                                           : m->displaced_mark_helper()->age();
 207     if (age < _tenuring_threshold) {
 208       return state;
 209     }
 210   }
 211   return dest(state);
 212 }
 213 
 214 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
 215                                                   oop const old, size_t word_sz, uint age,
 216                                                   HeapWord * const obj_ptr) const {
 217   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
 218   if (alloc_buf->contains(obj_ptr)) {
 219     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
 220                                                              dest_state.value() == InCSetState::Old,
 221                                                              alloc_buf->word_sz() * HeapWordSize);
 222   } else {
 223     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
 224                                                               dest_state.value() == InCSetState::Old);
 225   }
 226 }
 227 
 228 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
 229                                                  oop const old,
 230                                                  markOop const old_mark) {
 231   const size_t word_sz = old->size();
 232   HeapRegion* const from_region = _g1h->heap_region_containing(old);
 233   // +1 to make the -1 indexes valid...
 234   const int young_index = from_region->young_index_in_cset()+1;
 235   assert( (from_region->is_young() && young_index >  0) ||
 236          (!from_region->is_young() && young_index == 0), "invariant" );
 237 
 238   uint age = 0;
 239   InCSetState dest_state = next_state(state, old_mark, age);
 240   // The second clause is to prevent premature evacuation failure in case there
 241   // is still space in survivor, but old gen is full.
 242   if (_old_gen_is_full && dest_state.is_old()) {
 243     return handle_evacuation_failure_par(old, old_mark);
 244   }
 245   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
 246 
 247   // PLAB allocations should succeed most of the time, so we'll
 248   // normally check against NULL once and that's it.
 249   if (obj_ptr == NULL) {
 250     bool plab_refill_failed = false;
 251     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
 252     if (obj_ptr == NULL) {
 253       obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
 254       if (obj_ptr == NULL) {
 255         // This will either forward-to-self, or detect that someone else has
 256         // installed a forwarding pointer.
 257         return handle_evacuation_failure_par(old, old_mark);
 258       }
 259     }
 260     if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
 261       // The events are checked individually as part of the actual commit
 262       report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
 263     }
 264   }
 265 
 266   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
 267   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
 268 
 269 #ifndef PRODUCT
 270   // Should this evacuation fail?
 271   if (_g1h->evacuation_should_fail()) {
 272     // Doing this after all the allocation attempts also tests the
 273     // undo_allocation() method too.
 274     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
 275     return handle_evacuation_failure_par(old, old_mark);
 276   }
 277 #endif // !PRODUCT
 278 
 279   // We're going to allocate linearly, so might as well prefetch ahead.
 280   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
 281 
 282   const oop obj = oop(obj_ptr);
 283   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
 284   if (forward_ptr == NULL) {
 285     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
 286 
 287     if (dest_state.is_young()) {
 288       if (age < markOopDesc::max_age) {
 289         age++;
 290       }
 291       if (old_mark->has_displaced_mark_helper()) {
 292         // In this case, we have to install the mark word first,
 293         // otherwise obj looks to be forwarded (the old mark word,
 294         // which contains the forward pointer, was copied)
 295         obj->set_mark_raw(old_mark);
 296         markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
 297         old_mark->set_displaced_mark_helper(new_mark);
 298       } else {
 299         obj->set_mark_raw(old_mark->set_age(age));
 300       }
 301       _age_table.add(age, word_sz);
 302     } else {
 303       obj->set_mark_raw(old_mark);
 304     }
 305 
 306     if (G1StringDedup::is_enabled()) {
 307       const bool is_from_young = state.is_young();
 308       const bool is_to_young = dest_state.is_young();
 309       assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
 310              "sanity");
 311       assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
 312              "sanity");
 313       G1StringDedup::enqueue_from_evacuation(is_from_young,
 314                                              is_to_young,
 315                                              _worker_id,
 316                                              obj);
 317     }
 318 
 319     _surviving_young_words[young_index] += word_sz;
 320 
 321     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
 322       // We keep track of the next start index in the length field of
 323       // the to-space object. The actual length can be found in the
 324       // length field of the from-space object.
 325       arrayOop(obj)->set_length(0);
 326       oop* old_p = set_partial_array_mask(old);
 327       do_oop_partial_array(old_p);
 328     } else {
 329       G1ScanInYoungSetter x(&_scanner, dest_state.is_young());
 330       obj->oop_iterate_backwards(&_scanner);
 331     }
 332     return obj;
 333   } else {
 334     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
 335     return forward_ptr;
 336   }
 337 }
 338 
 339 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
 340   assert(worker_id < _n_workers, "out of bounds access");
 341   if (_states[worker_id] == NULL) {
 342     _states[worker_id] =
 343       new G1ParScanThreadState(_g1h, worker_id, _young_cset_length, _optional_cset_length);
 344   }
 345   return _states[worker_id];
 346 }
 347 
 348 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
 349   assert(_flushed, "thread local state from the per thread states should have been flushed");
 350   return _surviving_young_words_total;
 351 }
 352 
 353 void G1ParScanThreadStateSet::flush() {
 354   assert(!_flushed, "thread local state from the per thread states should be flushed once");
 355 
 356   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
 357     G1ParScanThreadState* pss = _states[worker_index];
 358 
 359     if (pss == NULL) {
 360       continue;
 361     }
 362 
 363     pss->flush(_surviving_young_words_total);
 364     delete pss;
 365     _states[worker_index] = NULL;
 366   }
 367   _flushed = true;
 368 }
 369 
 370 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
 371   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
 372 
 373   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
 374   if (forward_ptr == NULL) {
 375     // Forward-to-self succeeded. We are the "owner" of the object.
 376     HeapRegion* r = _g1h->heap_region_containing(old);
 377 
 378     if (!r->evacuation_failed()) {
 379       r->set_evacuation_failed(true);
 380      _g1h->hr_printer()->evac_failure(r);
 381     }
 382 
 383     _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
 384 
 385     G1ScanInYoungSetter x(&_scanner, r->is_young());
 386     old->oop_iterate_backwards(&_scanner);
 387 
 388     return old;
 389   } else {
 390     // Forward-to-self failed. Either someone else managed to allocate
 391     // space for this object (old != forward_ptr) or they beat us in
 392     // self-forwarding it (old == forward_ptr).
 393     assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
 394            "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
 395            "should not be in the CSet",
 396            p2i(old), p2i(forward_ptr));
 397     return forward_ptr;
 398   }
 399 }
 400 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
 401                                                  uint n_workers,
 402                                                  size_t young_cset_length,
 403                                                  size_t optional_cset_length) :
 404     _g1h(g1h),
 405     _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
 406     _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
 407     _young_cset_length(young_cset_length),
 408     _optional_cset_length(optional_cset_length),
 409     _n_workers(n_workers),
 410     _flushed(false) {
 411   for (uint i = 0; i < n_workers; ++i) {
 412     _states[i] = NULL;
 413   }
 414   memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
 415 }
 416 
 417 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
 418   assert(_flushed, "thread local state from the per thread states should have been flushed");
 419   FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
 420   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
 421 }