482
483 virtual HeapWord* allocate_new_tlab(size_t word_size);
484
485 virtual HeapWord* mem_allocate(size_t word_size,
486 bool* gc_overhead_limit_was_exceeded);
487
488 // The following three methods take a gc_count_before_ret
489 // parameter which is used to return the GC count if the method
490 // returns NULL. Given that we are required to read the GC count
491 // while holding the Heap_lock, and these paths will take the
492 // Heap_lock at some point, it's easier to get them to read the GC
493 // count while holding the Heap_lock before they return NULL instead
494 // of the caller (namely: mem_allocate()) having to also take the
495 // Heap_lock just to read the GC count.
496
497 // First-level mutator allocation attempt: try to allocate out of
498 // the mutator alloc region without taking the Heap_lock. This
499 // should only be used for non-humongous allocations.
500 inline HeapWord* attempt_allocation(size_t word_size,
501 uint* gc_count_before_ret,
502 uint* gclocker_retry_count_ret);
503
504 // Second-level mutator allocation attempt: take the Heap_lock and
505 // retry the allocation attempt, potentially scheduling a GC
506 // pause. This should only be used for non-humongous allocations.
507 HeapWord* attempt_allocation_slow(size_t word_size,
508 AllocationContext_t context,
509 uint* gc_count_before_ret,
510 uint* gclocker_retry_count_ret);
511
512 // Takes the Heap_lock and attempts a humongous allocation. It can
513 // potentially schedule a GC pause.
514 HeapWord* attempt_allocation_humongous(size_t word_size,
515 uint* gc_count_before_ret,
516 uint* gclocker_retry_count_ret);
517
518 // Allocation attempt that should be called during safepoints (e.g.,
519 // at the end of a successful GC). expect_null_mutator_alloc_region
520 // specifies whether the mutator alloc region is expected to be NULL
521 // or not.
522 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
523 AllocationContext_t context,
524 bool expect_null_mutator_alloc_region);
525
526 // It dirties the cards that cover the block so that so that the post
527 // write barrier never queues anything when updating objects on this
528 // block. It is assumed (and in fact we assert) that the block
529 // belongs to a young region.
530 inline void dirty_young_block(HeapWord* start, size_t word_size);
531
532 // Allocate blocks during garbage collection. Will ensure an
533 // allocation region, either by picking one or expanding the
534 // heap, and then allocate a block of the given size. The block
535 // may not be a humongous - it must fit into a single heap region.
536 inline HeapWord* par_allocate_during_gc(InCSetState dest,
729
730 #if TASKQUEUE_STATS
731 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
732 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
733 void reset_taskqueue_stats();
734 #endif // TASKQUEUE_STATS
735
736 // Schedule the VM operation that will do an evacuation pause to
737 // satisfy an allocation request of word_size. *succeeded will
738 // return whether the VM operation was successful (it did do an
739 // evacuation pause) or not (another thread beat us to it or the GC
740 // locker was active). Given that we should not be holding the
741 // Heap_lock when we enter this method, we will pass the
742 // gc_count_before (i.e., total_collections()) as a parameter since
743 // it has to be read while holding the Heap_lock. Currently, both
744 // methods that call do_collection_pause() release the Heap_lock
745 // before the call, so it's easy to read gc_count_before just before.
746 HeapWord* do_collection_pause(size_t word_size,
747 uint gc_count_before,
748 bool* succeeded,
749 GCCause::Cause gc_cause);
750
751 // The guts of the incremental collection pause, executed by the vm
752 // thread. It returns false if it is unable to do the collection due
753 // to the GC locker being active, true otherwise
754 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
755
756 // Actually do the work of evacuating the collection set.
757 void evacuate_collection_set(EvacuationInfo& evacuation_info);
758
759 // The g1 remembered set of the heap.
760 G1RemSet* _g1_rem_set;
761
762 // A set of cards that cover the objects for which the Rsets should be updated
763 // concurrently after the collection.
764 DirtyCardQueueSet _dirty_card_queue_set;
765
766 // The closure used to refine a single card.
767 RefineCardTableEntryClosure* _refine_cte_cl;
768
769 // A DirtyCardQueueSet that is used to hold cards that contain
|
482
483 virtual HeapWord* allocate_new_tlab(size_t word_size);
484
485 virtual HeapWord* mem_allocate(size_t word_size,
486 bool* gc_overhead_limit_was_exceeded);
487
488 // The following three methods take a gc_count_before_ret
489 // parameter which is used to return the GC count if the method
490 // returns NULL. Given that we are required to read the GC count
491 // while holding the Heap_lock, and these paths will take the
492 // Heap_lock at some point, it's easier to get them to read the GC
493 // count while holding the Heap_lock before they return NULL instead
494 // of the caller (namely: mem_allocate()) having to also take the
495 // Heap_lock just to read the GC count.
496
497 // First-level mutator allocation attempt: try to allocate out of
498 // the mutator alloc region without taking the Heap_lock. This
499 // should only be used for non-humongous allocations.
500 inline HeapWord* attempt_allocation(size_t word_size,
501 uint* gc_count_before_ret,
502 uint* gclocker_retry_count_ret,
503 uint* gc_attempt);
504
505 // Second-level mutator allocation attempt: take the Heap_lock and
506 // retry the allocation attempt, potentially scheduling a GC
507 // pause. This should only be used for non-humongous allocations.
508 HeapWord* attempt_allocation_slow(size_t word_size,
509 AllocationContext_t context,
510 uint* gc_count_before_ret,
511 uint* gclocker_retry_count_ret,
512 uint* gc_attempt);
513
514 // Takes the Heap_lock and attempts a humongous allocation. It can
515 // potentially schedule a GC pause.
516 HeapWord* attempt_allocation_humongous(size_t word_size,
517 uint* gc_count_before_ret,
518 uint* gclocker_retry_count_ret,
519 uint* gc_attempt);
520
521 // Allocation attempt that should be called during safepoints (e.g.,
522 // at the end of a successful GC). expect_null_mutator_alloc_region
523 // specifies whether the mutator alloc region is expected to be NULL
524 // or not.
525 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
526 AllocationContext_t context,
527 bool expect_null_mutator_alloc_region);
528
529 // It dirties the cards that cover the block so that so that the post
530 // write barrier never queues anything when updating objects on this
531 // block. It is assumed (and in fact we assert) that the block
532 // belongs to a young region.
533 inline void dirty_young_block(HeapWord* start, size_t word_size);
534
535 // Allocate blocks during garbage collection. Will ensure an
536 // allocation region, either by picking one or expanding the
537 // heap, and then allocate a block of the given size. The block
538 // may not be a humongous - it must fit into a single heap region.
539 inline HeapWord* par_allocate_during_gc(InCSetState dest,
732
733 #if TASKQUEUE_STATS
734 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
735 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
736 void reset_taskqueue_stats();
737 #endif // TASKQUEUE_STATS
738
739 // Schedule the VM operation that will do an evacuation pause to
740 // satisfy an allocation request of word_size. *succeeded will
741 // return whether the VM operation was successful (it did do an
742 // evacuation pause) or not (another thread beat us to it or the GC
743 // locker was active). Given that we should not be holding the
744 // Heap_lock when we enter this method, we will pass the
745 // gc_count_before (i.e., total_collections()) as a parameter since
746 // it has to be read while holding the Heap_lock. Currently, both
747 // methods that call do_collection_pause() release the Heap_lock
748 // before the call, so it's easy to read gc_count_before just before.
749 HeapWord* do_collection_pause(size_t word_size,
750 uint gc_count_before,
751 bool* succeeded,
752 GCCause::Cause gc_cause,
753 uint gc_attempt);
754
755 // The guts of the incremental collection pause, executed by the vm
756 // thread. It returns false if it is unable to do the collection due
757 // to the GC locker being active, true otherwise
758 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
759
760 // Actually do the work of evacuating the collection set.
761 void evacuate_collection_set(EvacuationInfo& evacuation_info);
762
763 // The g1 remembered set of the heap.
764 G1RemSet* _g1_rem_set;
765
766 // A set of cards that cover the objects for which the Rsets should be updated
767 // concurrently after the collection.
768 DirtyCardQueueSet _dirty_card_queue_set;
769
770 // The closure used to refine a single card.
771 RefineCardTableEntryClosure* _refine_cte_cl;
772
773 // A DirtyCardQueueSet that is used to hold cards that contain
|