531 int* gclocker_retry_count_ret);
532
533 // Allocation attempt that should be called during safepoints (e.g.,
534 // at the end of a successful GC). expect_null_mutator_alloc_region
535 // specifies whether the mutator alloc region is expected to be NULL
536 // or not.
537 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
538 AllocationContext_t context,
539 bool expect_null_mutator_alloc_region);
540
541 // It dirties the cards that cover the block so that so that the post
542 // write barrier never queues anything when updating objects on this
543 // block. It is assumed (and in fact we assert) that the block
544 // belongs to a young region.
545 inline void dirty_young_block(HeapWord* start, size_t word_size);
546
547 // Allocate blocks during garbage collection. Will ensure an
548 // allocation region, either by picking one or expanding the
549 // heap, and then allocate a block of the given size. The block
550 // may not be a humongous - it must fit into a single heap region.
551 inline HeapWord* par_allocate_during_gc(in_cset_state_t dest,
552 size_t word_size,
553 AllocationContext_t context);
554 // Ensure that no further allocations can happen in "r", bearing in mind
555 // that parallel threads might be attempting allocations.
556 void par_allocate_remaining_space(HeapRegion* r);
557
558 // Allocation attempt during GC for a survivor object / PLAB.
559 inline HeapWord* survivor_attempt_allocation(size_t word_size,
560 AllocationContext_t context);
561
562 // Allocation attempt during GC for an old object / PLAB.
563 inline HeapWord* old_attempt_allocation(size_t word_size,
564 AllocationContext_t context);
565
566 // These methods are the "callbacks" from the G1AllocRegion class.
567
568 // For mutator alloc regions.
569 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
570 void retire_mutator_alloc_region(HeapRegion* alloc_region,
571 size_t allocated_bytes);
572
573 // For GC alloc regions.
574 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
575 in_cset_state_t dest);
576 void retire_gc_alloc_region(HeapRegion* alloc_region,
577 size_t allocated_bytes, in_cset_state_t dest);
578
579 // - if explicit_gc is true, the GC is for a System.gc() or a heap
580 // inspection request and should collect the entire heap
581 // - if clear_all_soft_refs is true, all soft references should be
582 // cleared during the GC
583 // - if explicit_gc is false, word_size describes the allocation that
584 // the GC should attempt (at least) to satisfy
585 // - it returns false if it is unable to do the collection due to the
586 // GC locker being active, true otherwise
587 bool do_collection(bool explicit_gc,
588 bool clear_all_soft_refs,
589 size_t word_size);
590
591 // Callback from VM_G1CollectFull operation.
592 // Perform a full collection.
593 virtual void do_full_collection(bool clear_all_soft_refs);
594
595 // Resize the heap if necessary after a full collection. If this is
596 // after a collect-for allocation, "word_size" is the allocation size,
597 // and will be considered part of the used portion of the heap.
619 void enqueue_discovered_references(uint no_of_gc_workers);
620
621 public:
622
623 G1Allocator* allocator() {
624 return _allocator;
625 }
626
627 G1MonitoringSupport* g1mm() {
628 assert(_g1mm != NULL, "should have been initialized");
629 return _g1mm;
630 }
631
632 // Expand the garbage-first heap by at least the given size (in bytes!).
633 // Returns true if the heap was expanded by the requested amount;
634 // false otherwise.
635 // (Rounds up to a HeapRegion boundary.)
636 bool expand(size_t expand_bytes);
637
638 // Returns the PLAB statistics for a given destination.
639 inline PLABStats* alloc_buffer_stats(in_cset_state_t dest);
640
641 // Determines PLAB size for a given destination.
642 inline size_t desired_plab_sz(in_cset_state_t dest);
643
644 inline AllocationContextStats& allocation_context_stats();
645
646 // Do anything common to GC's.
647 virtual void gc_prologue(bool full);
648 virtual void gc_epilogue(bool full);
649
650 inline void set_humongous_is_live(oop obj);
651
652 bool humongous_is_live(uint region) {
653 return _humongous_is_live.is_live(region);
654 }
655
656 // Returns whether the given region (which must be a humongous (start) region)
657 // is to be considered conservatively live regardless of any other conditions.
658 bool humongous_region_is_always_live(uint index);
659 // Register the given region to be part of the collection set.
660 inline void register_humongous_region_with_in_cset_fast_test(uint index);
661 // Register regions with humongous objects (actually on the start region) in
662 // the in_cset_fast_test table.
1253 // extensive version.
1254 bool is_in_exact(const void* p) const;
1255 #endif
1256
1257 // Return "TRUE" iff the given object address is within the collection
1258 // set. Slow implementation.
1259 inline bool obj_in_cs(oop obj);
1260
1261 inline bool is_in_cset(oop obj);
1262
1263 inline bool is_in_cset_or_humongous(const oop obj);
1264
1265 private:
1266 // This array is used for a quick test on whether a reference points into
1267 // the collection set or not. Each of the array's elements denotes whether the
1268 // corresponding region is in the collection set or not.
1269 G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1270
1271 public:
1272
1273 inline in_cset_state_t in_cset_state(const oop obj);
1274
1275 // Return "TRUE" iff the given object address is in the reserved
1276 // region of g1.
1277 bool is_in_g1_reserved(const void* p) const {
1278 return _hrm.reserved().contains(p);
1279 }
1280
1281 // Returns a MemRegion that corresponds to the space that has been
1282 // reserved for the heap
1283 MemRegion g1_reserved() const {
1284 return _hrm.reserved();
1285 }
1286
1287 virtual bool is_in_closed_subset(const void* p) const;
1288
1289 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1290 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1291 }
1292
1293 // This resets the card table to all zeros. It is used after
|
531 int* gclocker_retry_count_ret);
532
533 // Allocation attempt that should be called during safepoints (e.g.,
534 // at the end of a successful GC). expect_null_mutator_alloc_region
535 // specifies whether the mutator alloc region is expected to be NULL
536 // or not.
537 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
538 AllocationContext_t context,
539 bool expect_null_mutator_alloc_region);
540
541 // It dirties the cards that cover the block so that so that the post
542 // write barrier never queues anything when updating objects on this
543 // block. It is assumed (and in fact we assert) that the block
544 // belongs to a young region.
545 inline void dirty_young_block(HeapWord* start, size_t word_size);
546
547 // Allocate blocks during garbage collection. Will ensure an
548 // allocation region, either by picking one or expanding the
549 // heap, and then allocate a block of the given size. The block
550 // may not be a humongous - it must fit into a single heap region.
551 inline HeapWord* par_allocate_during_gc(InCSetState dest,
552 size_t word_size,
553 AllocationContext_t context);
554 // Ensure that no further allocations can happen in "r", bearing in mind
555 // that parallel threads might be attempting allocations.
556 void par_allocate_remaining_space(HeapRegion* r);
557
558 // Allocation attempt during GC for a survivor object / PLAB.
559 inline HeapWord* survivor_attempt_allocation(size_t word_size,
560 AllocationContext_t context);
561
562 // Allocation attempt during GC for an old object / PLAB.
563 inline HeapWord* old_attempt_allocation(size_t word_size,
564 AllocationContext_t context);
565
566 // These methods are the "callbacks" from the G1AllocRegion class.
567
568 // For mutator alloc regions.
569 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
570 void retire_mutator_alloc_region(HeapRegion* alloc_region,
571 size_t allocated_bytes);
572
573 // For GC alloc regions.
574 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
575 InCSetState dest);
576 void retire_gc_alloc_region(HeapRegion* alloc_region,
577 size_t allocated_bytes, InCSetState dest);
578
579 // - if explicit_gc is true, the GC is for a System.gc() or a heap
580 // inspection request and should collect the entire heap
581 // - if clear_all_soft_refs is true, all soft references should be
582 // cleared during the GC
583 // - if explicit_gc is false, word_size describes the allocation that
584 // the GC should attempt (at least) to satisfy
585 // - it returns false if it is unable to do the collection due to the
586 // GC locker being active, true otherwise
587 bool do_collection(bool explicit_gc,
588 bool clear_all_soft_refs,
589 size_t word_size);
590
591 // Callback from VM_G1CollectFull operation.
592 // Perform a full collection.
593 virtual void do_full_collection(bool clear_all_soft_refs);
594
595 // Resize the heap if necessary after a full collection. If this is
596 // after a collect-for allocation, "word_size" is the allocation size,
597 // and will be considered part of the used portion of the heap.
619 void enqueue_discovered_references(uint no_of_gc_workers);
620
621 public:
622
623 G1Allocator* allocator() {
624 return _allocator;
625 }
626
627 G1MonitoringSupport* g1mm() {
628 assert(_g1mm != NULL, "should have been initialized");
629 return _g1mm;
630 }
631
632 // Expand the garbage-first heap by at least the given size (in bytes!).
633 // Returns true if the heap was expanded by the requested amount;
634 // false otherwise.
635 // (Rounds up to a HeapRegion boundary.)
636 bool expand(size_t expand_bytes);
637
638 // Returns the PLAB statistics for a given destination.
639 inline PLABStats* alloc_buffer_stats(InCSetState dest);
640
641 // Determines PLAB size for a given destination.
642 inline size_t desired_plab_sz(InCSetState dest);
643
644 inline AllocationContextStats& allocation_context_stats();
645
646 // Do anything common to GC's.
647 virtual void gc_prologue(bool full);
648 virtual void gc_epilogue(bool full);
649
650 inline void set_humongous_is_live(oop obj);
651
652 bool humongous_is_live(uint region) {
653 return _humongous_is_live.is_live(region);
654 }
655
656 // Returns whether the given region (which must be a humongous (start) region)
657 // is to be considered conservatively live regardless of any other conditions.
658 bool humongous_region_is_always_live(uint index);
659 // Register the given region to be part of the collection set.
660 inline void register_humongous_region_with_in_cset_fast_test(uint index);
661 // Register regions with humongous objects (actually on the start region) in
662 // the in_cset_fast_test table.
1253 // extensive version.
1254 bool is_in_exact(const void* p) const;
1255 #endif
1256
1257 // Return "TRUE" iff the given object address is within the collection
1258 // set. Slow implementation.
1259 inline bool obj_in_cs(oop obj);
1260
1261 inline bool is_in_cset(oop obj);
1262
1263 inline bool is_in_cset_or_humongous(const oop obj);
1264
1265 private:
1266 // This array is used for a quick test on whether a reference points into
1267 // the collection set or not. Each of the array's elements denotes whether the
1268 // corresponding region is in the collection set or not.
1269 G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1270
1271 public:
1272
1273 inline InCSetState in_cset_state(const oop obj);
1274
1275 // Return "TRUE" iff the given object address is in the reserved
1276 // region of g1.
1277 bool is_in_g1_reserved(const void* p) const {
1278 return _hrm.reserved().contains(p);
1279 }
1280
1281 // Returns a MemRegion that corresponds to the space that has been
1282 // reserved for the heap
1283 MemRegion g1_reserved() const {
1284 return _hrm.reserved();
1285 }
1286
1287 virtual bool is_in_closed_subset(const void* p) const;
1288
1289 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1290 return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1291 }
1292
1293 // This resets the card table to all zeros. It is used after
|