< prev index next >

src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp

Print this page
rev 49852 : imported patch 8191471-g1-retained-mutator-region


  37   if (pointer_delta(end(), obj) >= size) {
  38     // successful thread-local allocation
  39 #ifdef ASSERT
  40     // Skip mangling the space corresponding to the object header to
  41     // ensure that the returned space is not considered parsable by
  42     // any concurrent GC thread.
  43     size_t hdr_size = oopDesc::header_size();
  44     Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
  45 #endif // ASSERT
  46     // This addition is safe because we know that top is
  47     // at least size below end, so the add can't wrap.
  48     set_top(obj + size);
  49 
  50     invariants();
  51     return obj;
  52   }
  53   return NULL;
  54 }
  55 
  56 inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) {
  57   const size_t aligned_obj_size = align_object_size(obj_size);
  58 
  59   // Compute the size for the new TLAB.
  60   // The "last" tlab may be smaller to reduce fragmentation.
  61   // unsafe_max_tlab_alloc is just a hint.
  62   const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) /
  63                                                   HeapWordSize;
  64   size_t new_tlab_size = MIN3(available_size, desired_size() + aligned_obj_size, max_size());
  65 
  66   // Make sure there's enough room for object and filler int[].
  67   const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve();
  68   if (new_tlab_size < obj_plus_filler_size) {
  69     // If there isn't enough room for the allocation, return failure.
  70     log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns failure",
  71                         obj_size);
  72     return 0;
  73   }
  74   log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns " SIZE_FORMAT,
  75                       obj_size, new_tlab_size);
  76   return new_tlab_size;
  77 }
  78 




  79 
  80 void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) {
  81   // Raise size required to bypass TLAB next time. Why? Else there's
  82   // a risk that a thread that repeatedly allocates objects of one
  83   // size will get stuck on this slow path.
  84 
  85   set_refill_waste_limit(refill_waste_limit() + refill_waste_limit_increment());
  86 
  87   _slow_allocations++;
  88 
  89   log_develop_trace(gc, tlab)("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
  90                               " obj: " SIZE_FORMAT
  91                               " free: " SIZE_FORMAT
  92                               " waste: " SIZE_FORMAT,
  93                               "slow", p2i(myThread()), myThread()->osthread()->thread_id(),
  94                               obj_size, free(), refill_waste_limit());
  95 }
  96 
  97 #endif // SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP


  37   if (pointer_delta(end(), obj) >= size) {
  38     // successful thread-local allocation
  39 #ifdef ASSERT
  40     // Skip mangling the space corresponding to the object header to
  41     // ensure that the returned space is not considered parsable by
  42     // any concurrent GC thread.
  43     size_t hdr_size = oopDesc::header_size();
  44     Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
  45 #endif // ASSERT
  46     // This addition is safe because we know that top is
  47     // at least size below end, so the add can't wrap.
  48     set_top(obj + size);
  49 
  50     invariants();
  51     return obj;
  52   }
  53   return NULL;
  54 }
  55 
  56 inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) {


  57   // Compute the size for the new TLAB.
  58   // The "last" tlab may be smaller to reduce fragmentation.
  59   // unsafe_max_tlab_alloc is just a hint.
  60   const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) /
  61                                                   HeapWordSize;
  62   size_t new_tlab_size = MIN3(available_size, desired_size() + align_object_size(obj_size), max_size());
  63 
  64   // Make sure there's enough room for object and filler int[].
  65   if (new_tlab_size < compute_min_size(obj_size)) {

  66     // If there isn't enough room for the allocation, return failure.
  67     log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns failure",
  68                         obj_size);
  69     return 0;
  70   }
  71   log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns " SIZE_FORMAT,
  72                       obj_size, new_tlab_size);
  73   return new_tlab_size;
  74 }
  75 
  76 inline size_t ThreadLocalAllocBuffer::compute_min_size(size_t obj_size) {
  77   const size_t aligned_obj_size = align_object_size(obj_size);
  78   return aligned_obj_size + alignment_reserve();
  79 }
  80 
  81 void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) {
  82   // Raise size required to bypass TLAB next time. Why? Else there's
  83   // a risk that a thread that repeatedly allocates objects of one
  84   // size will get stuck on this slow path.
  85 
  86   set_refill_waste_limit(refill_waste_limit() + refill_waste_limit_increment());
  87 
  88   _slow_allocations++;
  89 
  90   log_develop_trace(gc, tlab)("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
  91                               " obj: " SIZE_FORMAT
  92                               " free: " SIZE_FORMAT
  93                               " waste: " SIZE_FORMAT,
  94                               "slow", p2i(myThread()), myThread()->osthread()->thread_id(),
  95                               obj_size, free(), refill_waste_limit());
  96 }
  97 
  98 #endif // SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP
< prev index next >