< prev index next >

src/share/vm/gc/g1/g1Allocator.cpp

Print this page
rev 8849 : imported patch 8003237-no-wait-for-free-list
rev 8850 : imported patch jon-fast-evac-failure
rev 8851 : imported patch bengt-jon-more-naming
rev 8854 : imported patch 8073013-add-detailed-information-about-plab-memory-usage
rev 8855 : imported patch jon-review-statistics
rev 8866 : imported patch 8067339-PLAB-reallocation-might-result-in-failure-to-allocate
rev 8867 : imported patch bengt-refactoring
rev 8868 : imported patch 8067336-allow-that-plab-allocations-at-end-of-regions-are-flexible
rev 8870 : [mq]: tom-review


 127 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
 128   // Return the remaining space in the cur alloc region, but not less than
 129   // the min TLAB size.
 130 
 131   // Also, this value can be at most the humongous object threshold,
 132   // since we can't allow tlabs to grow big enough to accommodate
 133   // humongous objects.
 134 
 135   HeapRegion* hr = mutator_alloc_region(context)->get();
 136   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
 137   if (hr == NULL) {
 138     return max_tlab;
 139   } else {
 140     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 141   }
 142 }
 143 
 144 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 145                                               size_t word_size,
 146                                               AllocationContext_t context) {









 147   switch (dest.value()) {
 148     case InCSetState::Young:
 149       return survivor_attempt_allocation(word_size, context);
 150     case InCSetState::Old:
 151       return old_attempt_allocation(word_size, context);
 152     default:
 153       ShouldNotReachHere();
 154       return NULL; // Keep some compilers happy
 155   }
 156 }
 157 
 158 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
 159   return _survivor_is_full;
 160 }
 161 
 162 bool G1Allocator::old_is_full(AllocationContext_t context) const {
 163   return _old_is_full;    
 164 }
 165 
 166 void G1Allocator::set_survivor_full(AllocationContext_t context) {
 167   _survivor_is_full = true;
 168 }
 169 
 170 void G1Allocator::set_old_full(AllocationContext_t context) {
 171   _old_is_full = true;
 172 }
 173 
 174 HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,


 175                                                    AllocationContext_t context) {
 176   assert(!_g1h->is_humongous(word_size),
 177          "we should not be seeing humongous-size allocations in this path");
 178 
 179   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,


 180                                                                            false /* bot_updates */);
 181   if (result == NULL && !survivor_is_full(context)) {
 182     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 183     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,


 184                                                                           false /* bot_updates */);
 185     if (result == NULL) {
 186       set_survivor_full(context);
 187     }
 188   }
 189   if (result != NULL) {
 190     _g1h->dirty_young_block(result, word_size);
 191   }
 192   return result;
 193 }
 194 
 195 HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,


 196                                               AllocationContext_t context) {
 197   assert(!_g1h->is_humongous(word_size),
 198          "we should not be seeing humongous-size allocations in this path");
 199 
 200   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,


 201                                                                       true /* bot_updates */);
 202   if (result == NULL && !old_is_full(context)) {
 203     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 204     result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,


 205                                                                      true /* bot_updates */);
 206     if (result == NULL) {
 207       set_old_full(context);
 208     }
 209   }
 210   return result;
 211 }
 212 
 213 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
 214   _survivor_is_full = false;
 215   _old_is_full = false;
 216 }
 217 
 218 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
 219   _g1h(G1CollectedHeap::heap()),
 220   _allocator(allocator),
 221   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 222   for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
 223     _direct_allocated[i] = 0;
 224   }


 226 
 227 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
 228   return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
 229 }
 230 
 231 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
 232                                                        size_t word_sz,
 233                                                        AllocationContext_t context,
 234                                                        bool* plab_refill_failed) {
 235   size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
 236   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 237 
 238   // Only get a new PLAB if the allocation fits and it would not waste more than
 239   // ParallelGCBufferWastePct in the existing buffer.
 240   if ((required_in_plab <= plab_word_size) &&
 241     may_throw_away_buffer(required_in_plab, plab_word_size)) {
 242 
 243     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 244     alloc_buf->retire();
 245 
 246     HeapWord* buf = _allocator->par_allocate_during_gc(dest, plab_word_size, context);





 247     if (buf != NULL) {
 248       // Otherwise.
 249       alloc_buf->set_buf(buf, plab_word_size);
 250 
 251       HeapWord* const obj = alloc_buf->allocate(word_sz);
 252       assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate "
 253                                   SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
 254                                   word_sz, required_in_plab, plab_word_size));
 255       return obj;
 256     }
 257     // Otherwise.
 258     *plab_refill_failed = true;
 259   }
 260   // Try direct allocation.
 261   HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
 262   if (result != NULL) {
 263     _direct_allocated[dest.value()] += word_sz;
 264   }
 265   return result;
 266 }
 267 
 268 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 269   alloc_buffer(dest, context)->undo_allocation(obj, word_sz);




 127 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
 128   // Return the remaining space in the cur alloc region, but not less than
 129   // the min TLAB size.
 130 
 131   // Also, this value can be at most the humongous object threshold,
 132   // since we can't allow tlabs to grow big enough to accommodate
 133   // humongous objects.
 134 
 135   HeapRegion* hr = mutator_alloc_region(context)->get();
 136   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
 137   if (hr == NULL) {
 138     return max_tlab;
 139   } else {
 140     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
 141   }
 142 }
 143 
 144 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 145                                               size_t word_size,
 146                                               AllocationContext_t context) {
 147   size_t temp;
 148   return par_allocate_during_gc(dest, word_size, word_size, &temp, context);
 149 }
 150 
 151 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
 152                                               size_t min_word_size,
 153                                               size_t desired_word_size,
 154                                               size_t* actual_word_size,
 155                                               AllocationContext_t context) {
 156   switch (dest.value()) {
 157     case InCSetState::Young:
 158       return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
 159     case InCSetState::Old:
 160       return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
 161     default:
 162       ShouldNotReachHere();
 163       return NULL; // Keep some compilers happy
 164   }
 165 }
 166 
 167 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
 168   return _survivor_is_full;
 169 }
 170 
 171 bool G1Allocator::old_is_full(AllocationContext_t context) const {
 172   return _old_is_full;    
 173 }
 174 
 175 void G1Allocator::set_survivor_full(AllocationContext_t context) {
 176   _survivor_is_full = true;
 177 }
 178 
 179 void G1Allocator::set_old_full(AllocationContext_t context) {
 180   _old_is_full = true;
 181 }
 182 
 183 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
 184                                                    size_t desired_word_size,
 185                                                    size_t* actual_word_size,
 186                                                    AllocationContext_t context) {
 187   assert(!_g1h->is_humongous(desired_word_size),
 188          "we should not be seeing humongous-size allocations in this path");
 189 
 190   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
 191                                                                            desired_word_size,
 192                                                                            actual_word_size,
 193                                                                            false /* bot_updates */);
 194   if (result == NULL && !survivor_is_full(context)) {
 195     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 196     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
 197                                                                           desired_word_size,
 198                                                                           actual_word_size,
 199                                                                           false /* bot_updates */);
 200     if (result == NULL) {
 201       set_survivor_full(context);
 202     }
 203   }
 204   if (result != NULL) {
 205     _g1h->dirty_young_block(result, *actual_word_size);
 206   }
 207   return result;
 208 }
 209 
 210 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
 211                                               size_t desired_word_size,
 212                                               size_t* actual_word_size,
 213                                               AllocationContext_t context) {
 214   assert(!_g1h->is_humongous(desired_word_size),
 215          "we should not be seeing humongous-size allocations in this path");
 216 
 217   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
 218                                                                       desired_word_size,
 219                                                                       actual_word_size,
 220                                                                       true /* bot_updates */);
 221   if (result == NULL && !old_is_full(context)) {
 222     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 223     result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
 224                                                                      desired_word_size,
 225                                                                      actual_word_size,
 226                                                                      true /* bot_updates */);
 227     if (result == NULL) {
 228       set_old_full(context);
 229     }
 230   }
 231   return result;
 232 }
 233 
 234 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
 235   _survivor_is_full = false;
 236   _old_is_full = false;
 237 }
 238 
 239 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
 240   _g1h(G1CollectedHeap::heap()),
 241   _allocator(allocator),
 242   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 243   for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
 244     _direct_allocated[i] = 0;
 245   }


 247 
 248 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
 249   return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
 250 }
 251 
 252 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
 253                                                        size_t word_sz,
 254                                                        AllocationContext_t context,
 255                                                        bool* plab_refill_failed) {
 256   size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
 257   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 258 
 259   // Only get a new PLAB if the allocation fits and it would not waste more than
 260   // ParallelGCBufferWastePct in the existing buffer.
 261   if ((required_in_plab <= plab_word_size) &&
 262     may_throw_away_buffer(required_in_plab, plab_word_size)) {
 263 
 264     G1PLAB* alloc_buf = alloc_buffer(dest, context);
 265     alloc_buf->retire();
 266 
 267     size_t actual_plab_size;
 268     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
 269                                                        required_in_plab,
 270                                                        plab_word_size,
 271                                                        &actual_plab_size,
 272                                                        context);
 273     if (buf != NULL) {
 274       // Otherwise.
 275       alloc_buf->set_buf(buf, actual_plab_size);
 276 
 277       HeapWord* const obj = alloc_buf->allocate(word_sz);
 278       assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate "
 279                                   SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
 280                                   word_sz, required_in_plab, plab_word_size));
 281       return obj;
 282     }
 283     // Otherwise.
 284     *plab_refill_failed = true;
 285   }
 286   // Try direct allocation.
 287   HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
 288   if (result != NULL) {
 289     _direct_allocated[dest.value()] += word_sz;
 290   }
 291   return result;
 292 }
 293 
 294 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 295   alloc_buffer(dest, context)->undo_allocation(obj, word_sz);


< prev index next >