403 // * All allocation requests for new TLABs should go to
404 // allocate_new_tlab().
405 //
406 // * All non-TLAB allocation requests should go to mem_allocate().
407 //
408 // * If either call cannot satisfy the allocation request using the
409 // current allocating region, they will try to get a new one. If
410 // this fails, they will attempt to do an evacuation pause and
411 // retry the allocation.
412 //
413 // * If all allocation attempts fail, even after trying to schedule
414 // an evacuation pause, allocate_new_tlab() will return NULL,
415 // whereas mem_allocate() will attempt a heap expansion and/or
416 // schedule a Full GC.
417 //
418 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
419 // should never be called with word_size being humongous. All
420 // humongous allocation requests should go to mem_allocate() which
421 // will satisfy them with a special path.
422
423 virtual HeapWord* allocate_new_tlab(size_t min_word_size,
424 size_t desired_word_size,
425 size_t* actual_word_size);
426
427 virtual HeapWord* mem_allocate(size_t word_size,
428 bool* gc_overhead_limit_was_exceeded);
429
430 // First-level mutator allocation attempt: try to allocate out of
431 // the mutator alloc region without taking the Heap_lock. This
432 // should only be used for non-humongous allocations.
433 inline HeapWord* attempt_allocation(size_t min_word_size,
434 size_t desired_word_size,
435 size_t* actual_word_size);
436
437 // Second-level mutator allocation attempt: take the Heap_lock and
438 // retry the allocation attempt, potentially scheduling a GC
439 // pause. This should only be used for non-humongous allocations.
440 HeapWord* attempt_allocation_slow(size_t word_size);
441
442 // Takes the Heap_lock and attempts a humongous allocation. It can
443 // potentially schedule a GC pause.
444 HeapWord* attempt_allocation_humongous(size_t word_size);
445
|
403 // * All allocation requests for new TLABs should go to
404 // allocate_new_tlab().
405 //
406 // * All non-TLAB allocation requests should go to mem_allocate().
407 //
408 // * If either call cannot satisfy the allocation request using the
409 // current allocating region, they will try to get a new one. If
410 // this fails, they will attempt to do an evacuation pause and
411 // retry the allocation.
412 //
413 // * If all allocation attempts fail, even after trying to schedule
414 // an evacuation pause, allocate_new_tlab() will return NULL,
415 // whereas mem_allocate() will attempt a heap expansion and/or
416 // schedule a Full GC.
417 //
418 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
419 // should never be called with word_size being humongous. All
420 // humongous allocation requests should go to mem_allocate() which
421 // will satisfy them with a special path.
422
423 virtual HeapWord* allocate_new_tlab(size_t min_size,
424 size_t requested_size,
425 size_t* actual_size);
426
427 virtual HeapWord* mem_allocate(size_t word_size,
428 bool* gc_overhead_limit_was_exceeded);
429
430 // First-level mutator allocation attempt: try to allocate out of
431 // the mutator alloc region without taking the Heap_lock. This
432 // should only be used for non-humongous allocations.
433 inline HeapWord* attempt_allocation(size_t min_word_size,
434 size_t desired_word_size,
435 size_t* actual_word_size);
436
437 // Second-level mutator allocation attempt: take the Heap_lock and
438 // retry the allocation attempt, potentially scheduling a GC
439 // pause. This should only be used for non-humongous allocations.
440 HeapWord* attempt_allocation_slow(size_t word_size);
441
442 // Takes the Heap_lock and attempts a humongous allocation. It can
443 // potentially schedule a GC pause.
444 HeapWord* attempt_allocation_humongous(size_t word_size);
445
|