< prev index next >

src/share/vm/gc/shared/collectedHeap.inline.hpp

Print this page
rev 8978 : imported patch remove_err_msg


 227   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 228   assert(size >= 0, "int won't convert to size_t");
 229   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
 230   ((oop)obj)->set_klass_gap(0);
 231   post_allocation_setup_array(klass, obj, length);
 232 #ifndef PRODUCT
 233   const size_t hs = oopDesc::header_size()+1;
 234   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
 235 #endif
 236   return (oop)obj;
 237 }
 238 
 239 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
 240                                                          HeapWord* end,
 241                                                          unsigned short alignment_in_bytes) {
 242   if (alignment_in_bytes <= ObjectAlignmentInBytes) {
 243     return addr;
 244   }
 245 
 246   assert(is_ptr_aligned(addr, HeapWordSize),
 247     err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr)));
 248   assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
 249     err_msg("Alignment size %u is incorrect.", alignment_in_bytes));
 250 
 251   HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
 252   size_t padding = pointer_delta(new_addr, addr);
 253 
 254   if (padding == 0) {
 255     return addr;
 256   }
 257 
 258   if (padding < CollectedHeap::min_fill_size()) {
 259     padding += alignment_in_bytes / HeapWordSize;
 260     assert(padding >= CollectedHeap::min_fill_size(),
 261       err_msg("alignment_in_bytes %u is expect to be larger "
 262       "than the minimum object size", alignment_in_bytes));
 263     new_addr = addr + padding;
 264   }
 265 
 266   assert(new_addr > addr, err_msg("Unexpected arithmetic overflow "
 267     PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)));
 268   if(new_addr < end) {
 269     CollectedHeap::fill_with_object(addr, padding);
 270     return new_addr;
 271   } else {
 272     return NULL;
 273   }
 274 }
 275 
 276 #ifndef PRODUCT
 277 
 278 inline bool
 279 CollectedHeap::promotion_should_fail(volatile size_t* count) {
 280   // Access to count is not atomic; the value does not have to be exact.
 281   if (PromotionFailureALot) {
 282     const size_t gc_num = total_collections();
 283     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 284     if (elapsed_gcs >= PromotionFailureALotInterval) {
 285       // Test for unsigned arithmetic wrap-around.
 286       if (++*count >= PromotionFailureALotCount) {
 287         *count = 0;




 227   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 228   assert(size >= 0, "int won't convert to size_t");
 229   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
 230   ((oop)obj)->set_klass_gap(0);
 231   post_allocation_setup_array(klass, obj, length);
 232 #ifndef PRODUCT
 233   const size_t hs = oopDesc::header_size()+1;
 234   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
 235 #endif
 236   return (oop)obj;
 237 }
 238 
 239 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
 240                                                          HeapWord* end,
 241                                                          unsigned short alignment_in_bytes) {
 242   if (alignment_in_bytes <= ObjectAlignmentInBytes) {
 243     return addr;
 244   }
 245 
 246   assert(is_ptr_aligned(addr, HeapWordSize),
 247          "Address " PTR_FORMAT " is not properly aligned.", p2i(addr));
 248   assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
 249          "Alignment size %u is incorrect.", alignment_in_bytes);
 250 
 251   HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
 252   size_t padding = pointer_delta(new_addr, addr);
 253 
 254   if (padding == 0) {
 255     return addr;
 256   }
 257 
 258   if (padding < CollectedHeap::min_fill_size()) {
 259     padding += alignment_in_bytes / HeapWordSize;
 260     assert(padding >= CollectedHeap::min_fill_size(),
 261            "alignment_in_bytes %u is expect to be larger "
 262            "than the minimum object size", alignment_in_bytes);
 263     new_addr = addr + padding;
 264   }
 265 
 266   assert(new_addr > addr, "Unexpected arithmetic overflow "
 267          PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr));
 268   if(new_addr < end) {
 269     CollectedHeap::fill_with_object(addr, padding);
 270     return new_addr;
 271   } else {
 272     return NULL;
 273   }
 274 }
 275 
 276 #ifndef PRODUCT
 277 
 278 inline bool
 279 CollectedHeap::promotion_should_fail(volatile size_t* count) {
 280   // Access to count is not atomic; the value does not have to be exact.
 281   if (PromotionFailureALot) {
 282     const size_t gc_num = total_collections();
 283     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 284     if (elapsed_gcs >= PromotionFailureALotInterval) {
 285       // Test for unsigned arithmetic wrap-around.
 286       if (++*count >= PromotionFailureALotCount) {
 287         *count = 0;


< prev index next >