405 //
406 // * All non-TLAB allocation requests should go to mem_allocate().
407 //
408 // * If either call cannot satisfy the allocation request using the
409 // current allocating region, they will try to get a new one. If
410 // this fails, they will attempt to do an evacuation pause and
411 // retry the allocation.
412 //
413 // * If all allocation attempts fail, even after trying to schedule
414 // an evacuation pause, allocate_new_tlab() will return NULL,
415 // whereas mem_allocate() will attempt a heap expansion and/or
416 // schedule a Full GC.
417 //
418 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
419 // should never be called with word_size being humongous. All
420 // humongous allocation requests should go to mem_allocate() which
421 // will satisfy them with a special path.
422
423 virtual HeapWord* allocate_new_tlab(size_t word_size);
424
425 virtual HeapWord* mem_allocate(size_t word_size,
426 bool* gc_overhead_limit_was_exceeded);
427
428 // First-level mutator allocation attempt: try to allocate out of
429 // the mutator alloc region without taking the Heap_lock. This
430 // should only be used for non-humongous allocations.
431 inline HeapWord* attempt_allocation(size_t word_size);
432
433 // Second-level mutator allocation attempt: take the Heap_lock and
434 // retry the allocation attempt, potentially scheduling a GC
435 // pause. This should only be used for non-humongous allocations.
436 HeapWord* attempt_allocation_slow(size_t word_size);
437
438 // Takes the Heap_lock and attempts a humongous allocation. It can
439 // potentially schedule a GC pause.
440 HeapWord* attempt_allocation_humongous(size_t word_size);
441
442 // Allocation attempt that should be called during safepoints (e.g.,
443 // at the end of a successful GC). expect_null_mutator_alloc_region
444 // specifies whether the mutator alloc region is expected to be NULL
445 // or not.
|
405 //
406 // * All non-TLAB allocation requests should go to mem_allocate().
407 //
408 // * If either call cannot satisfy the allocation request using the
409 // current allocating region, they will try to get a new one. If
410 // this fails, they will attempt to do an evacuation pause and
411 // retry the allocation.
412 //
413 // * If all allocation attempts fail, even after trying to schedule
414 // an evacuation pause, allocate_new_tlab() will return NULL,
415 // whereas mem_allocate() will attempt a heap expansion and/or
416 // schedule a Full GC.
417 //
418 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
419 // should never be called with word_size being humongous. All
420 // humongous allocation requests should go to mem_allocate() which
421 // will satisfy them with a special path.
422
423 virtual HeapWord* allocate_new_tlab(size_t word_size);
424
425 virtual HeapWord* mem_allocate(size_t word_size, Klass* klass, Thread* thread,
426 bool* gc_overhead_limit_was_exceeded);
427
428 // First-level mutator allocation attempt: try to allocate out of
429 // the mutator alloc region without taking the Heap_lock. This
430 // should only be used for non-humongous allocations.
431 inline HeapWord* attempt_allocation(size_t word_size);
432
433 // Second-level mutator allocation attempt: take the Heap_lock and
434 // retry the allocation attempt, potentially scheduling a GC
435 // pause. This should only be used for non-humongous allocations.
436 HeapWord* attempt_allocation_slow(size_t word_size);
437
438 // Takes the Heap_lock and attempts a humongous allocation. It can
439 // potentially schedule a GC pause.
440 HeapWord* attempt_allocation_humongous(size_t word_size);
441
442 // Allocation attempt that should be called during safepoints (e.g.,
443 // at the end of a successful GC). expect_null_mutator_alloc_region
444 // specifies whether the mutator alloc region is expected to be NULL
445 // or not.
|