< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page
rev 48920 : [backport] Use PLAB for evacuations instead of TLAB


   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shared/markBitMap.inline.hpp"

  29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shenandoah/brooksPointer.inline.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahControlThread.hpp"
  37 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/interfaceSupport.hpp"
  45 #include "runtime/prefetch.hpp"
  46 #include "runtime/prefetch.inline.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/copy.hpp"


 236     jbyte prev = _cancelled_concgc.cmpxchg(CANCELLED, CANCELLABLE);
 237     if (prev == CANCELLABLE) return true;
 238     else if (prev == CANCELLED) return false;
 239     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
 240     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
 241     {
 242       // We need to provide a safepoint here, otherwise we might
 243       // spin forever if a SP is pending.
 244       ThreadBlockInVM sp(JavaThread::current());
 245       SpinPause();
 246     }
 247   }
 248 }
 249 
 250 inline void ShenandoahHeap::clear_cancelled_concgc() {
 251   _cancelled_concgc.set(CANCELLABLE);
 252   _oom_evac_handler.clear();
 253 }
 254 
 255 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 256   if (UseTLAB) {
 257     if (!thread->gclab().is_initialized()) {
 258       assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 259              "Performance: thread should have GCLAB: %s", thread->name());
 260       // No GCLABs in this thread, fallback to shared allocation
 261       return NULL;
 262     }
 263     HeapWord* obj = thread->gclab().allocate(size);
 264     if (obj != NULL) {
 265       return obj;
 266     }
 267     // Otherwise...
 268     return allocate_from_gclab_slow(thread, size);
 269   } else {
 270     return NULL;
 271   }
 272 }
 273 
 274 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 275   if (Thread::current()->is_oom_during_evac()) {
 276     // This thread went through the OOM during evac protocol and it is safe to return
 277     // the forward pointer. It must not attempt to evacuate any more.
 278     return ShenandoahBarrierSet::resolve_forwarded(p);
 279   }
 280 
 281   size_t size_no_fwdptr = (size_t) p->size();
 282   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 283 
 284   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 285 
 286   bool alloc_from_gclab = true;
 287   HeapWord* filler;
 288 #ifdef ASSERT
 289 
 290   assert(thread->is_evac_allowed(), "must be enclosed in ShenandoahOOMDuringEvacHandler");
 291 


 332 
 333 #ifdef ASSERT
 334     assert(oopDesc::is_oop(copy_val), "expect oop");
 335     assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 336                                               p2i(p), p2i(copy));
 337 #endif
 338     return copy_val;
 339   }  else {
 340     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 341     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 342     // But if it happens to contain references to evacuated regions, those references would
 343     // not get updated for this stale copy during this cycle, and we will crash while scanning
 344     // it the next cycle.
 345     //
 346     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 347     // object will overwrite this stale copy, or the filler object on LAB retirement will
 348     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 349     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 350     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 351     if (alloc_from_gclab) {
 352       thread->gclab().rollback(size_with_fwdptr);
 353     } else {
 354       fill_with_object(copy, size_no_fwdptr);
 355     }
 356     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
 357                                       p2i(p), p2i(copy), p2i(result));
 358     return result;
 359   }
 360 }
 361 
 362 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 363   return ! is_marked_next(oop(entry));
 364 }
 365 
 366 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 367   assert(collection_set() != NULL, "Sanity");
 368   return collection_set()->is_in(region_index);
 369 }
 370 
 371 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 372   return region_in_collection_set(r->region_number());




   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shared/markBitMap.inline.hpp"
  29 #include "gc/shared/plab.hpp"
  30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  31 #include "gc/shared/suspendibleThreadSet.hpp"
  32 #include "gc/shenandoah/brooksPointer.inline.hpp"
  33 #include "gc/shenandoah/shenandoahAsserts.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  37 #include "gc/shenandoah/shenandoahControlThread.hpp"
  38 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/interfaceSupport.hpp"
  46 #include "runtime/prefetch.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"


 237     jbyte prev = _cancelled_concgc.cmpxchg(CANCELLED, CANCELLABLE);
 238     if (prev == CANCELLABLE) return true;
 239     else if (prev == CANCELLED) return false;
 240     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
 241     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
 242     {
 243       // We need to provide a safepoint here, otherwise we might
 244       // spin forever if a SP is pending.
 245       ThreadBlockInVM sp(JavaThread::current());
 246       SpinPause();
 247     }
 248   }
 249 }
 250 
 251 inline void ShenandoahHeap::clear_cancelled_concgc() {
 252   _cancelled_concgc.set(CANCELLABLE);
 253   _oom_evac_handler.clear();
 254 }
 255 
 256 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 257   PLAB* gclab = thread->gclab();
 258   if (gclab == NULL) {
 259     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 260            "Performance: thread should have GCLAB: %s", thread->name());
 261     // No GCLABs in this thread, fallback to shared allocation
 262     return NULL;
 263   }
 264   HeapWord* obj = gclab->allocate(size);
 265   if (obj != NULL) {
 266     return obj;
 267   }
 268   // Otherwise...
 269   return allocate_from_gclab_slow(thread, size);



 270 }
 271 
 272 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 273   if (Thread::current()->is_oom_during_evac()) {
 274     // This thread went through the OOM during evac protocol and it is safe to return
 275     // the forward pointer. It must not attempt to evacuate any more.
 276     return ShenandoahBarrierSet::resolve_forwarded(p);
 277   }
 278 
 279   size_t size_no_fwdptr = (size_t) p->size();
 280   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 281 
 282   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 283 
 284   bool alloc_from_gclab = true;
 285   HeapWord* filler;
 286 #ifdef ASSERT
 287 
 288   assert(thread->is_evac_allowed(), "must be enclosed in ShenandoahOOMDuringEvacHandler");
 289 


 330 
 331 #ifdef ASSERT
 332     assert(oopDesc::is_oop(copy_val), "expect oop");
 333     assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 334                                               p2i(p), p2i(copy));
 335 #endif
 336     return copy_val;
 337   }  else {
 338     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 339     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 340     // But if it happens to contain references to evacuated regions, those references would
 341     // not get updated for this stale copy during this cycle, and we will crash while scanning
 342     // it the next cycle.
 343     //
 344     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 345     // object will overwrite this stale copy, or the filler object on LAB retirement will
 346     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 347     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 348     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 349     if (alloc_from_gclab) {
 350       thread->gclab()->undo_allocation(filler, size_with_fwdptr);
 351     } else {
 352       fill_with_object(copy, size_no_fwdptr);
 353     }
 354     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
 355                                       p2i(p), p2i(copy), p2i(result));
 356     return result;
 357   }
 358 }
 359 
 360 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 361   return ! is_marked_next(oop(entry));
 362 }
 363 
 364 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 365   assert(collection_set() != NULL, "Sanity");
 366   return collection_set()->is_in(region_index);
 367 }
 368 
 369 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 370   return region_in_collection_set(r->region_number());


< prev index next >