48 } 49 50 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { 51 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers()); 52 // Prevent humongous PLAB sizes for two reasons: 53 // * PLABs are allocated using a similar paths as oops, but should 54 // never be in a humongous region 55 // * Allowing humongous PLABs needlessly churns the region free lists 56 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 57 } 58 59 // Inline functions for G1CollectedHeap 60 61 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { 62 return _allocation_context_stats; 63 } 64 65 // Return the region with the given index. It assumes the index is valid. 66 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 67 68 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 69 assert(is_in_reserved(addr), 70 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", 71 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())); 72 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 73 } 74 75 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 76 return _hrm.reserved().start() + index * HeapRegion::GrainWords; 77 } 78 79 template <class T> 80 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { 81 assert(addr != NULL, "invariant"); 82 assert(is_in_g1_reserved((const void*) addr), 83 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", 84 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); 85 return _hrm.addr_to_region((HeapWord*) addr); 86 } 87 88 template <class T> 89 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { 90 HeapRegion* hr = heap_region_containing_raw(addr); 91 if (hr->is_continues_humongous()) { 92 return hr->humongous_start_region(); 93 } 94 return hr; 95 } 96 97 inline void G1CollectedHeap::reset_gc_time_stamp() { 98 _gc_time_stamp = 0; 99 OrderAccess::fence(); 100 // Clear the cached CSet starting regions and time stamps. 101 // Their validity is dependent on the GC timestamp. 102 clear_cset_start_regions(); 103 } 104 105 inline void G1CollectedHeap::increment_gc_time_stamp() { 106 ++_gc_time_stamp; 107 OrderAccess::fence(); 108 } 109 110 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) { 111 _old_set.add(hr); 112 } 113 114 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 115 _old_set.remove(hr); 116 } 117 118 // It dirties the cards that cover the block so that so that the post 119 // write barrier never queues anything when updating objects on this 120 // block. It is assumed (and in fact we assert) that the block 121 // belongs to a young region. 122 inline void 123 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 124 assert_heap_not_locked(); 125 126 // Assign the containing region to containing_hr so that we don't 127 // have to keep calling heap_region_containing_raw() in the 128 // asserts below. 129 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) 130 assert(word_size > 0, "pre-condition"); 131 assert(containing_hr->is_in(start), "it should contain start"); 132 assert(containing_hr->is_young(), "it should be young"); 133 assert(!containing_hr->is_humongous(), "it should not be humongous"); 134 135 HeapWord* end = start + word_size; 136 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 137 138 MemRegion mr(start, end); 139 g1_barrier_set()->g1_mark_as_young(mr); 140 } 141 142 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { 143 return _task_queues->queue(i); 144 } 145 146 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 147 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 148 } 149 | 48 } 49 50 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { 51 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers()); 52 // Prevent humongous PLAB sizes for two reasons: 53 // * PLABs are allocated using a similar paths as oops, but should 54 // never be in a humongous region 55 // * Allowing humongous PLABs needlessly churns the region free lists 56 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 57 } 58 59 // Inline functions for G1CollectedHeap 60 61 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { 62 return _allocation_context_stats; 63 } 64 65 // Return the region with the given index. It assumes the index is valid. 66 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 67 68 inline HeapRegion* G1CollectedHeap::next_region_by_index(HeapRegion* hr) const { 69 return _hrm.next_region_by_index(hr); 70 } 71 72 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 73 assert(is_in_reserved(addr), 74 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", 75 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())); 76 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 77 } 78 79 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 80 return _hrm.reserved().start() + index * HeapRegion::GrainWords; 81 } 82 83 template <class T> 84 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { 85 assert(addr != NULL, "invariant"); 86 assert(is_in_g1_reserved((const void*) addr), 87 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", 88 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); 89 return _hrm.addr_to_region((HeapWord*) addr); 90 } 91 92 inline void G1CollectedHeap::reset_gc_time_stamp() { 93 _gc_time_stamp = 0; 94 OrderAccess::fence(); 95 // Clear the cached CSet starting regions and time stamps. 96 // Their validity is dependent on the GC timestamp. 97 clear_cset_start_regions(); 98 } 99 100 inline void G1CollectedHeap::increment_gc_time_stamp() { 101 ++_gc_time_stamp; 102 OrderAccess::fence(); 103 } 104 105 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) { 106 _old_set.add(hr); 107 } 108 109 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 110 _old_set.remove(hr); 111 } 112 113 // It dirties the cards that cover the block so that so that the post 114 // write barrier never queues anything when updating objects on this 115 // block. It is assumed (and in fact we assert) that the block 116 // belongs to a young region. 117 inline void 118 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 119 assert_heap_not_locked(); 120 121 // Assign the containing region to containing_hr so that we don't 122 // have to keep calling heap_region_containing() in the 123 // asserts below. 124 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);) 125 assert(word_size > 0, "pre-condition"); 126 assert(containing_hr->is_in(start), "it should contain start"); 127 assert(containing_hr->is_young(), "it should be young"); 128 assert(!containing_hr->is_humongous(), "it should not be humongous"); 129 130 HeapWord* end = start + word_size; 131 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 132 133 MemRegion mr(start, end); 134 g1_barrier_set()->g1_mark_as_young(mr); 135 } 136 137 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { 138 return _task_queues->queue(i); 139 } 140 141 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 142 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 143 } 144 |