< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

erik allocation

347 
348 #ifdef ASSERT                                                                                                                        
349 void CollectedHeap::check_for_valid_allocation_state() {                                                                             
350   Thread *thread = Thread::current();                                                                                                
351   // How to choose between a pending exception and a potential                                                                       
352   // OutOfMemoryError?  Don't allow pending exceptions.                                                                              
353   // This is a VM policy failure, so how do we exhaustively test it?                                                                 
354   assert(!thread->has_pending_exception(),                                                                                           
355          "shouldn't be allocating with pending exception");                                                                          
356   if (StrictSafepointChecks) {                                                                                                       
357     assert(thread->allow_allocation(),                                                                                               
358            "Allocation done by thread for which allocation is blocked "                                                              
359            "by No_Allocation_Verifier!");                                                                                            
360     // Allocation of an oop can always invoke a safepoint,                                                                           
361     // hence, the true argument                                                                                                      
362     thread->check_for_valid_safepoint_state(true);                                                                                   
363   }                                                                                                                                  
364 }                                                                                                                                    
365 #endif                                                                                                                               
366 
367 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {                                        
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
                                                                                                                                     
368 
369   // Retain tlab and allocate object in shared space if                                                                              
370   // the amount free in the tlab is too large to discard.                                                                            
371   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {                                                                 
372     thread->tlab().record_slow_allocation(size);                                                                                     
373     return NULL;                                                                                                                     
374   }                                                                                                                                  
375 
376   // Discard tlab and allocate a new one.                                                                                            
377   // To minimize fragmentation, the last TLAB may be smaller than the rest.                                                          
378   size_t new_tlab_size = thread->tlab().compute_size(size);                                                                          
379 
380   thread->tlab().clear_before_allocation();                                                                                          
381 
382   if (new_tlab_size == 0) {                                                                                                          
383     return NULL;                                                                                                                     
384   }                                                                                                                                  
385 
386   // Allocate a new TLAB requesting new_tlab_size. Any size                                                                          
387   // between minimal and new_tlab_size is accepted.                                                                                  
388   size_t actual_tlab_size = 0;                                                                                                       
389   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);                                                             
390   HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);                              
391   if (obj == NULL) {                                                                                                                 
392     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", ac
393            min_tlab_size, new_tlab_size, actual_tlab_size);                                                                          
394     return NULL;                                                                                                                     
395   }                                                                                                                                  
396   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desir
397          p2i(obj), min_tlab_size, new_tlab_size);                                                                                    
398 
399   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);                
400 
401   if (ZeroTLAB) {                                                                                                                    
402     // ..and clear it.                                                                                                               
403     Copy::zero_to_words(obj, actual_tlab_size);                                                                                      
404   } else {                                                                                                                           
405     // ...and zap just allocated object.                                                                                             
406 #ifdef ASSERT                                                                                                                        
407     // Skip mangling the space corresponding to the object header to                                                                 
408     // ensure that the returned space is not considered parsable by                                                                  
409     // any concurrent GC thread.                                                                                                     
410     size_t hdr_size = oopDesc::header_size();                                                                                        
411     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);                                                
412 #endif // ASSERT                                                                                                                     
413   }                                                                                                                                  
414   thread->tlab().fill(obj, obj + size, actual_tlab_size);                                                                            
415   return obj;                                                                                                                        
416 }                                                                                                                                    
417 
418 size_t CollectedHeap::max_tlab_size() const {                                                                                        
419   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].                                                           
420   // This restriction could be removed by enabling filling with multiple arrays.                                                     
421   // If we compute that the reasonable way as                                                                                        
422   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)                                                                     
423   // we'll overflow on the multiply, so we do the divide first.                                                                      
424   // We actually lose a little by dividing first,                                                                                    
425   // but that just makes the TLAB  somewhat smaller than the biggest array,                                                          
426   // which is fine, since we'll be able to fill that.                                                                                
427   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +                                                                       
428               sizeof(jint) *                                                                                                         
429               ((juint) max_jint / (size_t) HeapWordSize);                                                                            
430   return align_down(max_int_size, MinObjAlignment);                                                                                  
431 }                                                                                                                                    
432 
433 size_t CollectedHeap::filler_array_hdr_size() {                                                                                      

347 
348 #ifdef ASSERT
349 void CollectedHeap::check_for_valid_allocation_state() {
350   Thread *thread = Thread::current();
351   // How to choose between a pending exception and a potential
352   // OutOfMemoryError?  Don't allow pending exceptions.
353   // This is a VM policy failure, so how do we exhaustively test it?
354   assert(!thread->has_pending_exception(),
355          "shouldn't be allocating with pending exception");
356   if (StrictSafepointChecks) {
357     assert(thread->allow_allocation(),
358            "Allocation done by thread for which allocation is blocked "
359            "by No_Allocation_Verifier!");
360     // Allocation of an oop can always invoke a safepoint,
361     // hence, the true argument
362     thread->check_for_valid_safepoint_state(true);
363   }
364 }
365 #endif
366 
367 HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
368                                           bool* gc_overhead_limit_was_exceeded, TRAPS) {
369   if (UseTLAB) {
370     HeapWord* result = allocate_from_tlab(klass, size, THREAD);
371     if (result != NULL) {
372       return result;
373     }
374   }
375   return Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
376 }
377 
378 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
379   ThreadLocalAllocBuffer& tlab = THREAD->tlab();
380 
381   // Retain tlab and allocate object in shared space if
382   // the amount free in the tlab is too large to discard.
383   if (tlab.free() > tlab.refill_waste_limit()) {
384     tlab.record_slow_allocation(size);
385     return NULL;
386   }
387 
388   // Discard tlab and allocate a new one.
389   // To minimize fragmentation, the last TLAB may be smaller than the rest.
390   size_t new_tlab_size = tlab.compute_size(size);
391 
392   tlab.clear_before_allocation();
393 
394   if (new_tlab_size == 0) {
395     return NULL;
396   }
397 
398   // Allocate a new TLAB requesting new_tlab_size. Any size
399   // between minimal and new_tlab_size is accepted.
400   size_t actual_tlab_size = 0;
401   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
402   HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
403   if (obj == NULL) {
404     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", ac
405            min_tlab_size, new_tlab_size, actual_tlab_size);
406     return NULL;
407   }
408   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desir
409          p2i(obj), min_tlab_size, new_tlab_size);
410 
411   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
412 
413   if (ZeroTLAB) {
414     // ..and clear it.
415     Copy::zero_to_words(obj, actual_tlab_size);
416   } else {
417     // ...and zap just allocated object.
418 #ifdef ASSERT
419     // Skip mangling the space corresponding to the object header to
420     // ensure that the returned space is not considered parsable by
421     // any concurrent GC thread.
422     size_t hdr_size = oopDesc::header_size();
423     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
424 #endif // ASSERT
425   }
426   tlab.fill(obj, obj + size, actual_tlab_size);
427   return obj;
428 }
429 
430 size_t CollectedHeap::max_tlab_size() const {
431   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
432   // This restriction could be removed by enabling filling with multiple arrays.
433   // If we compute that the reasonable way as
434   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
435   // we'll overflow on the multiply, so we do the divide first.
436   // We actually lose a little by dividing first,
437   // but that just makes the TLAB  somewhat smaller than the biggest array,
438   // which is fine, since we'll be able to fill that.
439   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
440               sizeof(jint) *
441               ((juint) max_jint / (size_t) HeapWordSize);
442   return align_down(max_int_size, MinObjAlignment);
443 }
444 
445 size_t CollectedHeap::filler_array_hdr_size() {
< prev index next >