< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 51304 : [mq]: 8207200-getmemoryusage-consistency


 311     VMThread::execute(&op);
 312 
 313     // If GC was locked out, try again. Check before checking success because the
 314     // prologue could have succeeded and the GC still have been locked out.
 315     if (op.gc_locked()) {
 316       continue;
 317     }
 318 
 319     if (op.prologue_succeeded()) {
 320       return op.result();
 321     }
 322     loop_count++;
 323     if ((QueuedAllocationWarningCount > 0) &&
 324         (loop_count % QueuedAllocationWarningCount == 0)) {
 325       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
 326                             " size=" SIZE_FORMAT, loop_count, word_size);
 327     }
 328   } while (true);  // Until a GC is done
 329 }
 330 





 331 #ifndef PRODUCT
 332 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 333   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 334     for (size_t slot = 0; slot < size; slot += 1) {
 335       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 336              "Found non badHeapWordValue in pre-allocation check");
 337     }
 338   }
 339 }
 340 #endif // PRODUCT
 341 
 342 size_t CollectedHeap::max_tlab_size() const {
 343   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 344   // This restriction could be removed by enabling filling with multiple arrays.
 345   // If we compute that the reasonable way as
 346   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 347   // we'll overflow on the multiply, so we do the divide first.
 348   // We actually lose a little by dividing first,
 349   // but that just makes the TLAB  somewhat smaller than the biggest array,
 350   // which is fine, since we'll be able to fill that.




 311     VMThread::execute(&op);
 312 
 313     // If GC was locked out, try again. Check before checking success because the
 314     // prologue could have succeeded and the GC still have been locked out.
 315     if (op.gc_locked()) {
 316       continue;
 317     }
 318 
 319     if (op.prologue_succeeded()) {
 320       return op.result();
 321     }
 322     loop_count++;
 323     if ((QueuedAllocationWarningCount > 0) &&
 324         (loop_count % QueuedAllocationWarningCount == 0)) {
 325       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
 326                             " size=" SIZE_FORMAT, loop_count, word_size);
 327     }
 328   } while (true);  // Until a GC is done
 329 }
 330 
 331 MemoryUsage CollectedHeap::memory_usage() {
 332   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
 333 }
 334 
 335 
 336 #ifndef PRODUCT
 337 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 338   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 339     for (size_t slot = 0; slot < size; slot += 1) {
 340       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 341              "Found non badHeapWordValue in pre-allocation check");
 342     }
 343   }
 344 }
 345 #endif // PRODUCT
 346 
 347 size_t CollectedHeap::max_tlab_size() const {
 348   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 349   // This restriction could be removed by enabling filling with multiple arrays.
 350   // If we compute that the reasonable way as
 351   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 352   // we'll overflow on the multiply, so we do the divide first.
 353   // We actually lose a little by dividing first,
 354   // but that just makes the TLAB  somewhat smaller than the biggest array,
 355   // which is fine, since we'll be able to fill that.


< prev index next >