1 /*
   2  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  27 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  28 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  29 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  30 #include "gc_implementation/shared/gcTrace.hpp"
  31 #include "gc_implementation/shared/mutableSpace.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/memRegion.hpp"
  34 #include "memory/padded.inline.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/instanceMirrorKlass.inline.hpp"
  37 #include "oops/objArrayKlass.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "utilities/stack.inline.hpp"
  40 
  41 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
  42 OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
  43 PSOldGen*                      PSPromotionManager::_old_gen = NULL;
  44 MutableSpace*                  PSPromotionManager::_young_space = NULL;
  45 
  46 void PSPromotionManager::initialize() {
  47   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  48   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  49 
  50   _old_gen = heap->old_gen();
  51   _young_space = heap->young_gen()->to_space();
  52 
  53   // To prevent false sharing, we pad the PSPromotionManagers
  54   // and make sure that the first instance starts at a cache line.
  55   assert(_manager_array == NULL, "Attempt to initialize twice");
  56   _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
  57   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
  58 
  59   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
  60   guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
  61 
  62   // Create and register the PSPromotionManager(s) for the worker threads.
  63   for(uint i=0; i<ParallelGCThreads; i++) {
  64     stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
  65   }
  66   // The VMThread gets its own PSPromotionManager, which is not available
  67   // for work stealing.
  68 }
  69 
  70 // Helper functions to get around the circular dependency between
  71 // psScavenge.inline.hpp and psPromotionManager.inline.hpp.
  72 bool PSPromotionManager::should_scavenge(oop* p, bool check_to_space) {
  73   return PSScavenge::should_scavenge(p, check_to_space);
  74 }
  75 bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) {
  76   return PSScavenge::should_scavenge(p, check_to_space);
  77 }
  78 
  79 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
  80   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
  81   assert(_manager_array != NULL, "Sanity");
  82   return &_manager_array[index];
  83 }
  84 
  85 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
  86   assert(_manager_array != NULL, "Sanity");
  87   return &_manager_array[ParallelGCThreads];
  88 }
  89 
  90 void PSPromotionManager::pre_scavenge() {
  91   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  92   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  93 
  94   _young_space = heap->young_gen()->to_space();
  95 
  96   for(uint i=0; i<ParallelGCThreads+1; i++) {
  97     manager_array(i)->reset();
  98   }
  99 }
 100 
 101 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
 102   bool promotion_failure_occurred = false;
 103 
 104   TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
 105   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
 106     PSPromotionManager* manager = manager_array(i);
 107     assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
 108     if (manager->_promotion_failed_info.has_failed()) {
 109       gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
 110       promotion_failure_occurred = true;
 111     }
 112     manager->flush_labs();
 113   }
 114   return promotion_failure_occurred;
 115 }
 116 
 117 #if TASKQUEUE_STATS
 118 void
 119 PSPromotionManager::print_local_stats(outputStream* const out, uint i) const {
 120   #define FMT " " SIZE_FORMAT_W(10)
 121   out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
 122                 _arrays_chunked, _array_chunks_processed);
 123   #undef FMT
 124 }
 125 
 126 static const char* const pm_stats_hdr[] = {
 127   "    --------masked-------     arrays      array",
 128   "thr       push      steal    chunked     chunks",
 129   "--- ---------- ---------- ---------- ----------"
 130 };
 131 
 132 void
 133 PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
 134   out->print_cr("== GC Tasks Stats, GC %3d",
 135                 Universe::heap()->total_collections());
 136 
 137   TaskQueueStats totals;
 138   out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
 139   out->print("--- "); TaskQueueStats::print_header(2, out); out->cr();
 140   for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
 141     TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats;
 142     out->print("%3d ", i); next.print(out); out->cr();
 143     totals += next;
 144   }
 145   out->print("tot "); totals.print(out); out->cr();
 146 
 147   const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
 148   for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]);
 149   for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
 150     manager_array(i)->print_local_stats(out, i);
 151   }
 152 }
 153 
 154 void
 155 PSPromotionManager::reset_stats() {
 156   claimed_stack_depth()->stats.reset();
 157   _masked_pushes = _masked_steals = 0;
 158   _arrays_chunked = _array_chunks_processed = 0;
 159 }
 160 #endif // TASKQUEUE_STATS
 161 
 162 PSPromotionManager::PSPromotionManager() {
 163   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 164   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 165 
 166   // We set the old lab's start array.
 167   _old_lab.set_start_array(old_gen()->start_array());
 168 
 169   uint queue_size;
 170   claimed_stack_depth()->initialize();
 171   queue_size = claimed_stack_depth()->max_elems();
 172 
 173   _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
 174   if (_totally_drain) {
 175     _target_stack_size = 0;
 176   } else {
 177     // don't let the target stack size to be more than 1/4 of the entries
 178     _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
 179                                      (uint) (queue_size / 4));
 180   }
 181 
 182   _array_chunk_size = ParGCArrayScanChunk;
 183   // let's choose 1.5x the chunk size
 184   _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
 185 
 186   reset();
 187 }
 188 
 189 void PSPromotionManager::reset() {
 190   assert(stacks_empty(), "reset of non-empty stack");
 191 
 192   // We need to get an assert in here to make sure the labs are always flushed.
 193 
 194   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 195   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 196 
 197   // Do not prefill the LAB's, save heap wastage!
 198   HeapWord* lab_base = young_space()->top();
 199   _young_lab.initialize(MemRegion(lab_base, (size_t)0));
 200   _young_gen_is_full = false;
 201 
 202   lab_base = old_gen()->object_space()->top();
 203   _old_lab.initialize(MemRegion(lab_base, (size_t)0));
 204   _old_gen_is_full = false;
 205 
 206   _promotion_failed_info.reset();
 207 
 208   TASKQUEUE_STATS_ONLY(reset_stats());
 209 }
 210 
 211 
 212 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
 213   totally_drain = totally_drain || _totally_drain;
 214 
 215 #ifdef ASSERT
 216   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 217   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 218   MutableSpace* to_space = heap->young_gen()->to_space();
 219   MutableSpace* old_space = heap->old_gen()->object_space();
 220 #endif /* ASSERT */
 221 
 222   OopStarTaskQueue* const tq = claimed_stack_depth();
 223   do {
 224     StarTask p;
 225 
 226     // Drain overflow stack first, so other threads can steal from
 227     // claimed stack while we work.
 228     while (tq->pop_overflow(p)) {
 229       process_popped_location_depth(p);
 230     }
 231 
 232     if (totally_drain) {
 233       while (tq->pop_local(p)) {
 234         process_popped_location_depth(p);
 235       }
 236     } else {
 237       while (tq->size() > _target_stack_size && tq->pop_local(p)) {
 238         process_popped_location_depth(p);
 239       }
 240     }
 241   } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
 242 
 243   assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
 244   assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
 245   assert(tq->overflow_empty(), "Sanity");
 246 }
 247 
 248 void PSPromotionManager::flush_labs() {
 249   assert(stacks_empty(), "Attempt to flush lab with live stack");
 250 
 251   // If either promotion lab fills up, we can flush the
 252   // lab but not refill it, so check first.
 253   assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
 254   if (!_young_lab.is_flushed())
 255     _young_lab.flush();
 256 
 257   assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
 258   if (!_old_lab.is_flushed())
 259     _old_lab.flush();
 260 
 261   // Let PSScavenge know if we overflowed
 262   if (_young_gen_is_full) {
 263     PSScavenge::set_survivor_overflow(true);
 264   }
 265 }
 266 
 267 template <class T> void PSPromotionManager::process_array_chunk_work(
 268                                                  oop obj,
 269                                                  int start, int end) {
 270   assert(start <= end, "invariant");
 271   T* const base      = (T*)objArrayOop(obj)->base();
 272   T* p               = base + start;
 273   T* const chunk_end = base + end;
 274   while (p < chunk_end) {
 275     if (PSScavenge::should_scavenge(p)) {
 276       claim_or_forward_depth(p);
 277     }
 278     ++p;
 279   }
 280 }
 281 
 282 void PSPromotionManager::process_array_chunk(oop old) {
 283   assert(PSChunkLargeArrays, "invariant");
 284   assert(old->is_objArray(), "invariant");
 285   assert(old->is_forwarded(), "invariant");
 286 
 287   TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
 288 
 289   oop const obj = old->forwardee();
 290 
 291   int start;
 292   int const end = arrayOop(old)->length();
 293   if (end > (int) _min_array_size_for_chunking) {
 294     // we'll chunk more
 295     start = end - _array_chunk_size;
 296     assert(start > 0, "invariant");
 297     arrayOop(old)->set_length(start);
 298     push_depth(mask_chunked_array_oop(old));
 299     TASKQUEUE_STATS_ONLY(++_masked_pushes);
 300   } else {
 301     // this is the final chunk for this array
 302     start = 0;
 303     int const actual_length = arrayOop(obj)->length();
 304     arrayOop(old)->set_length(actual_length);
 305   }
 306 
 307   if (UseCompressedOops) {
 308     process_array_chunk_work<narrowOop>(obj, start, end);
 309   } else {
 310     process_array_chunk_work<oop>(obj, start, end);
 311   }
 312 }
 313 
 314 class PushContentsClosure : public ExtendedOopClosure {
 315   PSPromotionManager* _pm;
 316  public:
 317   PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {}
 318 
 319   template <typename T> void do_oop_nv(T* p) {
 320     if (PSScavenge::should_scavenge(p)) {
 321       _pm->claim_or_forward_depth(p);
 322     }
 323   }
 324 
 325   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 326   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 327 
 328   // Don't use the oop verification code in the oop_oop_iterate framework.
 329   debug_only(virtual bool should_verify_oops() { return false; })
 330 };
 331 
 332 void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 333   PushContentsClosure cl(pm);
 334   oop_oop_iterate_oop_maps_reverse<true>(obj, &cl);
 335 }
 336 
 337 void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 338     // Note that we don't have to follow the mirror -> klass pointer, since all
 339     // klasses that are dirty will be scavenged when we iterate over the
 340     // ClassLoaderData objects.
 341 
 342   InstanceKlass::oop_ps_push_contents(obj, pm);
 343 
 344   PushContentsClosure cl(pm);
 345   oop_oop_iterate_statics<true>(obj, &cl);
 346 }
 347 
 348 void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 349   InstanceKlass::oop_ps_push_contents(obj, pm);
 350 
 351   // This is called by the young collector. It will already have taken care of
 352   // all class loader data. So, we don't have to follow the class loader ->
 353   // class loader data link.
 354 }
 355 
 356 template <class T>
 357 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
 358   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
 359   if (PSScavenge::should_scavenge(referent_addr)) {
 360     ReferenceProcessor* rp = PSScavenge::reference_processor();
 361     if (rp->discover_reference(obj, klass->reference_type())) {
 362       // reference already enqueued, referent and next will be traversed later
 363       klass->InstanceKlass::oop_ps_push_contents(obj, pm);
 364       return;
 365     } else {
 366       // treat referent as normal oop
 367       pm->claim_or_forward_depth(referent_addr);
 368     }
 369   }
 370   // Treat discovered as normal oop, if ref is not "active",
 371   // i.e. if next is non-NULL.
 372   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
 373   if (ReferenceProcessor::pending_list_uses_discovered_field()) {
 374     T  next_oop = oopDesc::load_heap_oop(next_addr);
 375     if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
 376       T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
 377       debug_only(
 378         if(TraceReferenceGC && PrintGCDetails) {
 379           gclog_or_tty->print_cr("   Process discovered as normal "
 380                                  PTR_FORMAT, p2i(discovered_addr));
 381         }
 382       )
 383       if (PSScavenge::should_scavenge(discovered_addr)) {
 384         pm->claim_or_forward_depth(discovered_addr);
 385       }
 386     }
 387   } else {
 388 #ifdef ASSERT
 389     // In the case of older JDKs which do not use the discovered
 390     // field for the pending list, an inactive ref (next != NULL)
 391     // must always have a NULL discovered field.
 392     oop next = oopDesc::load_decode_heap_oop(next_addr);
 393     oop discovered = java_lang_ref_Reference::discovered(obj);
 394     assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
 395            err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
 396                    p2i(obj)));
 397 #endif
 398   }
 399 
 400   // Treat next as normal oop;  next is a link in the reference queue.
 401   if (PSScavenge::should_scavenge(next_addr)) {
 402     pm->claim_or_forward_depth(next_addr);
 403   }
 404   klass->InstanceKlass::oop_ps_push_contents(obj, pm);
 405 }
 406 
 407 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 408   if (UseCompressedOops) {
 409     oop_ps_push_contents_specialized<narrowOop>(obj, this, pm);
 410   } else {
 411     oop_ps_push_contents_specialized<oop>(obj, this, pm);
 412   }
 413 }
 414 
 415 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 416   assert(obj->is_objArray(), "obj must be obj array");
 417   PushContentsClosure cl(pm);
 418   oop_oop_iterate_elements<true>(objArrayOop(obj), &cl);
 419 }
 420 
 421 void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
 422   assert(obj->is_typeArray(),"must be a type array");
 423   ShouldNotReachHere();
 424 }
 425 
 426 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
 427   assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
 428 
 429   // Attempt to CAS in the header.
 430   // This tests if the header is still the same as when
 431   // this started.  If it is the same (i.e., no forwarding
 432   // pointer has been installed), then this thread owns
 433   // it.
 434   if (obj->cas_forward_to(obj, obj_mark)) {
 435     // We won any races, we "own" this object.
 436     assert(obj == obj->forwardee(), "Sanity");
 437 
 438     _promotion_failed_info.register_copy_failure(obj->size());
 439 
 440     push_contents(obj);
 441 
 442     // Save the mark if needed
 443     PSScavenge::oop_promotion_failed(obj, obj_mark);
 444   }  else {
 445     // We lost, someone else "owns" this object
 446     guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
 447 
 448     // No unallocation to worry about.
 449     obj = obj->forwardee();
 450   }
 451 
 452 #ifndef PRODUCT
 453   if (TraceScavenge) {
 454     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
 455                            "promotion-failure",
 456                            obj->klass()->internal_name(),
 457                            p2i(obj), obj->size());
 458 
 459   }
 460 #endif
 461 
 462   return obj;
 463 }