< prev index next >

src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp

Print this page




  72 
  73   print_stats("gc");
  74 
  75   if (_number_of_refills > 0) {
  76     // Update allocation history if a reasonable amount of eden was allocated.
  77     bool update_allocation_history = used > 0.5 * capacity;
  78 
  79     if (update_allocation_history) {
  80       // Average the fraction of eden allocated in a tlab by this
  81       // thread for use in the next resize operation.
  82       // _gc_waste is not subtracted because it's included in
  83       // "used".
  84       // The result can be larger than 1.0 due to direct to old allocations.
  85       // These allocations should ideally not be counted but since it is not possible
  86       // to filter them out here we just cap the fraction to be at most 1.0.
  87       double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used);
  88       _allocation_fraction.sample(alloc_frac);
  89     }
  90     global_stats()->update_allocating_threads();
  91     global_stats()->update_number_of_refills(_number_of_refills);
  92     global_stats()->update_allocation(_number_of_refills * desired_size());
  93     global_stats()->update_gc_waste(_gc_waste);
  94     global_stats()->update_slow_refill_waste(_slow_refill_waste);
  95     global_stats()->update_fast_refill_waste(_fast_refill_waste);
  96 
  97   } else {
  98     assert(_number_of_refills == 0 && _fast_refill_waste == 0 &&
  99            _slow_refill_waste == 0 && _gc_waste          == 0,
 100            "tlab stats == 0");
 101   }
 102   global_stats()->update_slow_allocations(_slow_allocations);
 103 }
 104 
 105 // Fills the current tlab with a dummy filler array to create
 106 // an illusion of a contiguous Eden and optionally retires the tlab.
 107 // Waste accounting should be done in caller as appropriate; see,
 108 // for example, clear_before_allocation().
 109 void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) {
 110   if (end() != NULL) {
 111     invariants();
 112 


 145 
 146   new_size = MIN2(MAX2(new_size, min_size()), max_size());
 147 
 148   size_t aligned_new_size = align_object_size(new_size);
 149 
 150   log_trace(gc, tlab)("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
 151                       " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT,
 152                       p2i(myThread()), myThread()->osthread()->thread_id(),
 153                       _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
 154 
 155   set_desired_size(aligned_new_size);
 156   set_refill_waste_limit(initial_refill_waste_limit());
 157 }
 158 
 159 void ThreadLocalAllocBuffer::initialize_statistics() {
 160     _number_of_refills = 0;
 161     _fast_refill_waste = 0;
 162     _slow_refill_waste = 0;
 163     _gc_waste          = 0;
 164     _slow_allocations  = 0;

 165 }
 166 
 167 void ThreadLocalAllocBuffer::fill(HeapWord* start,
 168                                   HeapWord* top,
 169                                   size_t    new_size) {
 170   _number_of_refills++;

 171   print_stats("fill");
 172   assert(top <= start + new_size - alignment_reserve(), "size too small");
 173   initialize(start, top, start + new_size - alignment_reserve());
 174 
 175   // Reset amount of internal fragmentation
 176   set_refill_waste_limit(initial_refill_waste_limit());
 177 }
 178 
 179 void ThreadLocalAllocBuffer::initialize(HeapWord* start,
 180                                         HeapWord* top,
 181                                         HeapWord* end) {
 182   set_start(start);
 183   set_top(top);
 184   set_pf_top(top);
 185   set_end(end);
 186   invariants();
 187 }
 188 
 189 void ThreadLocalAllocBuffer::initialize() {
 190   initialize(NULL,                    // start


 257   } else if (global_stats() != NULL) {
 258     // Initial size is a function of the average number of allocating threads.
 259     unsigned nof_threads = global_stats()->allocating_threads_avg();
 260 
 261     init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
 262                       (nof_threads * target_refills());
 263     init_sz = align_object_size(init_sz);
 264   }
 265   init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
 266   return init_sz;
 267 }
 268 
 269 void ThreadLocalAllocBuffer::print_stats(const char* tag) {
 270   Log(gc, tlab) log;
 271   if (!log.is_trace()) {
 272     return;
 273   }
 274 
 275   Thread* thrd = myThread();
 276   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
 277   size_t alloc = _number_of_refills * _desired_size;
 278   double waste_percent = percent_of(waste, alloc);
 279   size_t tlab_used  = Universe::heap()->tlab_used(thrd);
 280   log.trace("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
 281             " desired_size: " SIZE_FORMAT "KB"
 282             " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
 283             " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB"
 284             " slow: %dB fast: %dB",
 285             tag, p2i(thrd), thrd->osthread()->thread_id(),
 286             _desired_size / (K / HeapWordSize),
 287             _slow_allocations, _refill_waste_limit * HeapWordSize,
 288             _allocation_fraction.average(),
 289             _allocation_fraction.average() * tlab_used / K,
 290             _number_of_refills, waste_percent,
 291             _gc_waste * HeapWordSize,
 292             _slow_refill_waste * HeapWordSize,
 293             _fast_refill_waste * HeapWordSize);
 294 }
 295 
 296 void ThreadLocalAllocBuffer::verify() {
 297   HeapWord* p = start();
 298   HeapWord* t = top();




  72 
  73   print_stats("gc");
  74 
  75   if (_number_of_refills > 0) {
  76     // Update allocation history if a reasonable amount of eden was allocated.
  77     bool update_allocation_history = used > 0.5 * capacity;
  78 
  79     if (update_allocation_history) {
  80       // Average the fraction of eden allocated in a tlab by this
  81       // thread for use in the next resize operation.
  82       // _gc_waste is not subtracted because it's included in
  83       // "used".
  84       // The result can be larger than 1.0 due to direct to old allocations.
  85       // These allocations should ideally not be counted but since it is not possible
  86       // to filter them out here we just cap the fraction to be at most 1.0.
  87       double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used);
  88       _allocation_fraction.sample(alloc_frac);
  89     }
  90     global_stats()->update_allocating_threads();
  91     global_stats()->update_number_of_refills(_number_of_refills);
  92     global_stats()->update_allocation(_allocated_size);
  93     global_stats()->update_gc_waste(_gc_waste);
  94     global_stats()->update_slow_refill_waste(_slow_refill_waste);
  95     global_stats()->update_fast_refill_waste(_fast_refill_waste);
  96 
  97   } else {
  98     assert(_number_of_refills == 0 && _fast_refill_waste == 0 &&
  99            _slow_refill_waste == 0 && _gc_waste          == 0,
 100            "tlab stats == 0");
 101   }
 102   global_stats()->update_slow_allocations(_slow_allocations);
 103 }
 104 
 105 // Fills the current tlab with a dummy filler array to create
 106 // an illusion of a contiguous Eden and optionally retires the tlab.
 107 // Waste accounting should be done in caller as appropriate; see,
 108 // for example, clear_before_allocation().
 109 void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) {
 110   if (end() != NULL) {
 111     invariants();
 112 


 145 
 146   new_size = MIN2(MAX2(new_size, min_size()), max_size());
 147 
 148   size_t aligned_new_size = align_object_size(new_size);
 149 
 150   log_trace(gc, tlab)("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
 151                       " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT,
 152                       p2i(myThread()), myThread()->osthread()->thread_id(),
 153                       _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
 154 
 155   set_desired_size(aligned_new_size);
 156   set_refill_waste_limit(initial_refill_waste_limit());
 157 }
 158 
 159 void ThreadLocalAllocBuffer::initialize_statistics() {
 160     _number_of_refills = 0;
 161     _fast_refill_waste = 0;
 162     _slow_refill_waste = 0;
 163     _gc_waste          = 0;
 164     _slow_allocations  = 0;
 165     _allocated_size    = 0;
 166 }
 167 
 168 void ThreadLocalAllocBuffer::fill(HeapWord* start,
 169                                   HeapWord* top,
 170                                   size_t    new_size) {
 171   _number_of_refills++;
 172   _allocated_size += new_size;
 173   print_stats("fill");
 174   assert(top <= start + new_size - alignment_reserve(), "size too small");
 175   initialize(start, top, start + new_size - alignment_reserve());
 176 
 177   // Reset amount of internal fragmentation
 178   set_refill_waste_limit(initial_refill_waste_limit());
 179 }
 180 
 181 void ThreadLocalAllocBuffer::initialize(HeapWord* start,
 182                                         HeapWord* top,
 183                                         HeapWord* end) {
 184   set_start(start);
 185   set_top(top);
 186   set_pf_top(top);
 187   set_end(end);
 188   invariants();
 189 }
 190 
 191 void ThreadLocalAllocBuffer::initialize() {
 192   initialize(NULL,                    // start


 259   } else if (global_stats() != NULL) {
 260     // Initial size is a function of the average number of allocating threads.
 261     unsigned nof_threads = global_stats()->allocating_threads_avg();
 262 
 263     init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
 264                       (nof_threads * target_refills());
 265     init_sz = align_object_size(init_sz);
 266   }
 267   init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
 268   return init_sz;
 269 }
 270 
 271 void ThreadLocalAllocBuffer::print_stats(const char* tag) {
 272   Log(gc, tlab) log;
 273   if (!log.is_trace()) {
 274     return;
 275   }
 276 
 277   Thread* thrd = myThread();
 278   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
 279   double waste_percent = percent_of(waste, _allocated_size);

 280   size_t tlab_used  = Universe::heap()->tlab_used(thrd);
 281   log.trace("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
 282             " desired_size: " SIZE_FORMAT "KB"
 283             " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
 284             " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB"
 285             " slow: %dB fast: %dB",
 286             tag, p2i(thrd), thrd->osthread()->thread_id(),
 287             _desired_size / (K / HeapWordSize),
 288             _slow_allocations, _refill_waste_limit * HeapWordSize,
 289             _allocation_fraction.average(),
 290             _allocation_fraction.average() * tlab_used / K,
 291             _number_of_refills, waste_percent,
 292             _gc_waste * HeapWordSize,
 293             _slow_refill_waste * HeapWordSize,
 294             _fast_refill_waste * HeapWordSize);
 295 }
 296 
 297 void ThreadLocalAllocBuffer::verify() {
 298   HeapWord* p = start();
 299   HeapWord* t = top();


< prev index next >