1 /*
   2  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
  26 #define SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
  27 
  28 #include "gc/parallel/parallelScavengeHeap.hpp"
  29 #include "gc/parallel/psOldGen.hpp"
  30 #include "gc/parallel/psPromotionLAB.inline.hpp"
  31 #include "gc/parallel/psPromotionManager.hpp"
  32 #include "gc/parallel/psScavenge.hpp"
  33 #include "gc/shared/taskqueue.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/oop.inline.hpp"
  36 
  37 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
  38   assert(_manager_array != NULL, "access of NULL manager_array");
  39   assert(index <= ParallelGCThreads, "out of range manager_array access");
  40   return &_manager_array[index];
  41 }
  42 
  43 template <class T>
  44 inline void PSPromotionManager::push_depth(T* p) {
  45   claimed_stack_depth()->push(p);
  46 }
  47 
  48 template <class T>
  49 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
  50   if (p != NULL) { // XXX: error if p != NULL here
  51     oop o = oopDesc::load_decode_heap_oop_not_null(p);
  52     if (o->is_forwarded()) {
  53       o = o->forwardee();
  54       // Card mark
  55       if (PSScavenge::is_obj_in_young(o)) {
  56         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
  57       }
  58       oopDesc::encode_store_heap_oop_not_null(p, o);
  59     } else {
  60       push_depth(p);
  61     }
  62   }
  63 }
  64 
  65 template <class T>
  66 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
  67   assert(should_scavenge(p, true), "revisiting object?");
  68   assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
  69 
  70   claim_or_forward_internal_depth(p);
  71 }
  72 
  73 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
  74                                                       size_t obj_size,
  75                                                       uint age, bool tenured,
  76                                                       const PSPromotionLAB* lab) {
  77   // Skip if memory allocation failed
  78   if (new_obj != NULL) {
  79     const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
  80 
  81     if (lab != NULL) {
  82       // Promotion of object through newly allocated PLAB
  83       if (gc_tracer->should_report_promotion_in_new_plab_event()) {
  84         size_t obj_bytes = obj_size * HeapWordSize;
  85         size_t lab_size = lab->capacity();
  86         gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
  87                                                       age, tenured, lab_size);
  88       }
  89     } else {
  90       // Promotion of object directly to heap
  91       if (gc_tracer->should_report_promotion_outside_plab_event()) {
  92         size_t obj_bytes = obj_size * HeapWordSize;
  93         gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
  94                                                        age, tenured);
  95       }
  96     }
  97   }
  98 }
  99 
 100 inline void PSPromotionManager::push_contents(oop obj) {
 101   obj->ps_push_contents(this);
 102 }
 103 //
 104 // This method is pretty bulky. It would be nice to split it up
 105 // into smaller submethods, but we need to be careful not to hurt
 106 // performance.
 107 //
 108 template<bool promote_immediately>
 109 inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
 110   assert(should_scavenge(&o), "Sanity");
 111 
 112   oop new_obj = NULL;
 113 
 114   // NOTE! We must be very careful with any methods that access the mark
 115   // in o. There may be multiple threads racing on it, and it may be forwarded
 116   // at any time. Do not use oop methods for accessing the mark!
 117   markOop test_mark = o->mark();
 118 
 119   // The same test as "o->is_forwarded()"
 120   if (!test_mark->is_marked()) {
 121     bool new_obj_is_tenured = false;
 122     size_t new_obj_size = o->size();
 123 
 124     // Find the objects age, MT safe.
 125     uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
 126       test_mark->displaced_mark_helper()->age() : test_mark->age();
 127 
 128     if (!promote_immediately) {
 129       // Try allocating obj in to-space (unless too old)
 130       if (age < PSScavenge::tenuring_threshold()) {
 131         new_obj = (oop) _young_lab.allocate(new_obj_size);
 132         if (new_obj == NULL && !_young_gen_is_full) {
 133           // Do we allocate directly, or flush and refill?
 134           if (new_obj_size > (YoungPLABSize / 2)) {
 135             // Allocate this object directly
 136             new_obj = (oop)young_space()->cas_allocate(new_obj_size);
 137             promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
 138           } else {
 139             // Flush and fill
 140             _young_lab.flush();
 141 
 142             HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
 143             if (lab_base != NULL) {
 144               _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
 145               // Try the young lab allocation again.
 146               new_obj = (oop) _young_lab.allocate(new_obj_size);
 147               promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
 148             } else {
 149               _young_gen_is_full = true;
 150             }
 151           }
 152         }
 153       }
 154     }
 155 
 156     // Otherwise try allocating obj tenured
 157     if (new_obj == NULL) {
 158 #ifndef PRODUCT
 159       if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
 160         return oop_promotion_failed(o, test_mark);
 161       }
 162 #endif  // #ifndef PRODUCT
 163 
 164       new_obj = (oop) _old_lab.allocate(new_obj_size);
 165       new_obj_is_tenured = true;
 166 
 167       if (new_obj == NULL) {
 168         if (!_old_gen_is_full) {
 169           // Do we allocate directly, or flush and refill?
 170           if (new_obj_size > (OldPLABSize / 2)) {
 171             // Allocate this object directly
 172             new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
 173             promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
 174           } else {
 175             // Flush and fill
 176             _old_lab.flush();
 177 
 178             HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
 179             if(lab_base != NULL) {
 180 #ifdef ASSERT
 181               // Delay the initialization of the promotion lab (plab).
 182               // This exposes uninitialized plabs to card table processing.
 183               if (GCWorkerDelayMillis > 0) {
 184                 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
 185               }
 186 #endif
 187               _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
 188               // Try the old lab allocation again.
 189               new_obj = (oop) _old_lab.allocate(new_obj_size);
 190               promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
 191             }
 192           }
 193         }
 194 
 195         // This is the promotion failed test, and code handling.
 196         // The code belongs here for two reasons. It is slightly
 197         // different than the code below, and cannot share the
 198         // CAS testing code. Keeping the code here also minimizes
 199         // the impact on the common case fast path code.
 200 
 201         if (new_obj == NULL) {
 202           _old_gen_is_full = true;
 203           return oop_promotion_failed(o, test_mark);
 204         }
 205       }
 206     }
 207 
 208     assert(new_obj != NULL, "allocation should have succeeded");
 209 
 210     // Copy obj
 211     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 212 
 213     // Now we have to CAS in the header.
 214     if (o->cas_forward_to(new_obj, test_mark)) {
 215       // We won any races, we "own" this object.
 216       assert(new_obj == o->forwardee(), "Sanity");
 217 
 218       // Increment age if obj still in new generation. Now that
 219       // we're dealing with a markOop that cannot change, it is
 220       // okay to use the non mt safe oop methods.
 221       if (!new_obj_is_tenured) {
 222         new_obj->incr_age();
 223         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
 224       }
 225 
 226       // Do the size comparison first with new_obj_size, which we
 227       // already have. Hopefully, only a few objects are larger than
 228       // _min_array_size_for_chunking, and most of them will be arrays.
 229       // So, the is->objArray() test would be very infrequent.
 230       if (new_obj_size > _min_array_size_for_chunking &&
 231           new_obj->is_objArray() &&
 232           PSChunkLargeArrays) {
 233         // we'll chunk it
 234         oop* const masked_o = mask_chunked_array_oop(o);
 235         push_depth(masked_o);
 236         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
 237       } else {
 238         // we'll just push its contents
 239         push_contents(new_obj);
 240       }
 241     }  else {
 242       // We lost, someone else "owns" this object
 243       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
 244 
 245       // Try to deallocate the space.  If it was directly allocated we cannot
 246       // deallocate it, so we have to test.  If the deallocation fails,
 247       // overwrite with a filler object.
 248       if (new_obj_is_tenured) {
 249         if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 250           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 251         }
 252       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 253         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 254       }
 255 
 256       // don't update this before the unallocation!
 257       new_obj = o->forwardee();
 258     }
 259   } else {
 260     assert(o->is_forwarded(), "Sanity");
 261     new_obj = o->forwardee();
 262   }
 263 
 264   log_develop(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",




 265        should_scavenge(&new_obj) ? "copying" : "tenuring",
 266        new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());


 267 
 268   return new_obj;
 269 }
 270 
 271 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 272 // This version tests the oop* to make sure it is within the heap before
 273 // attempting marking.
 274 template <class T, bool promote_immediately>
 275 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
 276   assert(should_scavenge(p, true), "revisiting object?");
 277 
 278   oop o = oopDesc::load_decode_heap_oop_not_null(p);
 279   oop new_obj = o->is_forwarded()
 280         ? o->forwardee()
 281         : copy_to_survivor_space<promote_immediately>(o);
 282 
 283 #ifndef PRODUCT
 284   // This code must come after the CAS test, or it will print incorrect
 285   // information.
 286   if (o->is_forwarded()) {
 287     log_develop(gc, scavenge)("{forwarding %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",

 288        new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
 289   }
 290 #endif
 291 
 292   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 293 
 294   // We cannot mark without test, as some code passes us pointers
 295   // that are outside the heap. These pointers are either from roots
 296   // or from metadata.
 297   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
 298       ParallelScavengeHeap::heap()->is_in_reserved(p)) {
 299     if (PSScavenge::is_obj_in_young(new_obj)) {
 300       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
 301     }
 302   }
 303 }
 304 
 305 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
 306   if (is_oop_masked(p)) {
 307     assert(PSChunkLargeArrays, "invariant");
 308     oop const old = unmask_chunked_array_oop(p);
 309     process_array_chunk(old);
 310   } else {
 311     if (p.is_narrow()) {
 312       assert(UseCompressedOops, "Error");
 313       copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(p);
 314     } else {
 315       copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(p);
 316     }
 317   }
 318 }
 319 
 320 inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) {
 321   return stack_array_depth()->steal(queue_num, seed, t);
 322 }
 323 
 324 #if TASKQUEUE_STATS
 325 void PSPromotionManager::record_steal(StarTask& p) {
 326   if (is_oop_masked(p)) {
 327     ++_masked_steals;
 328   }
 329 }
 330 #endif // TASKQUEUE_STATS
 331 
 332 #endif // SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
--- EOF ---