src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
Print this page
rev 6166 : 8038498: Fix includes and C inlining after 8035330
*** 704,726 ****
}
// This is a fast test on whether a reference points into the
// collection set or not. Assume that the reference
// points into the heap.
! bool in_cset_fast_test(oop obj) {
! assert(_in_cset_fast_test != NULL, "sanity");
! assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
! // no need to subtract the bottom of the heap from obj,
! // _in_cset_fast_test is biased
! uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
! bool ret = _in_cset_fast_test[index];
! // let's make sure the result is consistent with what the slower
! // test returns
! assert( ret || !obj_in_cs(obj), "sanity");
! assert(!ret || obj_in_cs(obj), "sanity");
! return ret;
! }
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false,
(size_t) _in_cset_fast_test_length * sizeof(bool));
--- 704,714 ----
}
// This is a fast test on whether a reference points into the
// collection set or not. Assume that the reference
// points into the heap.
! inline bool in_cset_fast_test(oop obj);
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false,
(size_t) _in_cset_fast_test_length * sizeof(bool));
*** 1248,1260 ****
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
append_secondary_free_list();
}
}
! void old_set_remove(HeapRegion* hr) {
! _old_set.remove(hr);
! }
size_t non_young_capacity_bytes() {
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
}
--- 1236,1246 ----
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
append_secondary_free_list();
}
}
! inline void old_set_remove(HeapRegion* hr);
size_t non_young_capacity_bytes() {
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
}
*** 1341,1351 ****
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
! HeapRegion* region_at(uint index) const { return _hrs.at(index); }
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
// in parallel by ParallelGCThreads worker threads with distinct worker
--- 1327,1337 ----
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
! inline HeapRegion* region_at(uint index) const;
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
// in parallel by ParallelGCThreads worker threads with distinct worker
*** 1470,1483 ****
virtual bool card_mark_must_follow_store() const {
return true;
}
! bool is_in_young(const oop obj) {
! HeapRegion* hr = heap_region_containing(obj);
! return hr != NULL && hr->is_young();
! }
#ifdef ASSERT
virtual bool is_in_partial_collection(const void* p);
#endif
--- 1456,1466 ----
virtual bool card_mark_must_follow_store() const {
return true;
}
! inline bool is_in_young(const oop obj);
#ifdef ASSERT
virtual bool is_in_partial_collection(const void* p);
#endif
*** 1486,1498 ****
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
! virtual bool can_elide_initializing_store_barrier(oop new_obj) {
! return is_in_young(new_obj);
! }
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs
// are capped at the humongous threshold and we want to
--- 1469,1479 ----
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
! virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs
// are capped at the humongous threshold and we want to
*** 1582,1608 ****
// This will find the region to which the object belongs and
// then call the region version of the same function.
// Added if it is NULL it isn't dead.
! bool is_obj_dead(const oop obj) const {
! const HeapRegion* hr = heap_region_containing(obj);
! if (hr == NULL) {
! if (obj == NULL) return false;
! else return true;
! }
! else return is_obj_dead(obj, hr);
! }
!
! bool is_obj_ill(const oop obj) const {
! const HeapRegion* hr = heap_region_containing(obj);
! if (hr == NULL) {
! if (obj == NULL) return false;
! else return true;
! }
! else return is_obj_ill(obj, hr);
! }
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
bool is_marked(oop obj, VerifyOption vo);
const char* top_at_mark_start_str(VerifyOption vo);
--- 1563,1575 ----
// This will find the region to which the object belongs and
// then call the region version of the same function.
// Added if it is NULL it isn't dead.
! inline bool is_obj_dead(const oop obj) const;
!
! inline bool is_obj_ill(const oop obj) const;
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
bool is_marked(oop obj, VerifyOption vo);
const char* top_at_mark_start_str(VerifyOption vo);
*** 1692,1721 ****
// parameter. The values for that parameter, and their meanings,
// are the same as those above.
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
! const VerifyOption vo) const {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
! case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
! case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
! default: ShouldNotReachHere();
! }
! return false; // keep some compilers happy
! }
bool is_obj_dead_cond(const oop obj,
! const VerifyOption vo) const {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
! case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
! case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
! default: ShouldNotReachHere();
! }
! return false; // keep some compilers happy
! }
// Printing
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
--- 1659,1672 ----
// parameter. The values for that parameter, and their meanings,
// are the same as those above.
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
! const VerifyOption vo) const;
bool is_obj_dead_cond(const oop obj,
! const VerifyOption vo) const;
// Printing
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
*** 1805,1819 ****
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
! template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
! if (!from->is_survivor()) {
! _g1_rem->par_write_ref(from, p, tid);
! }
! }
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
--- 1756,1766 ----
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
! template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
*** 1851,1867 ****
template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity");
refs()->push(ref);
}
! template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
! if (G1DeferredRSUpdate) {
! deferred_rs_update(from, p, tid);
! } else {
! immediate_rs_update(from, p, tid);
! }
! }
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
--- 1798,1808 ----
template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity");
refs()->push(ref);
}
! template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
*** 1981,2038 ****
inline oop clear_partial_array_mask(oop* ref) const {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
! void do_oop_partial_array(oop* p) {
! assert(has_partial_array_mask(p), "invariant");
! oop from_obj = clear_partial_array_mask(p);
!
! assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
! assert(from_obj->is_objArray(), "must be obj array");
! objArrayOop from_obj_array = objArrayOop(from_obj);
! // The from-space object contains the real length.
! int length = from_obj_array->length();
!
! assert(from_obj->is_forwarded(), "must be forwarded");
! oop to_obj = from_obj->forwardee();
! assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
! objArrayOop to_obj_array = objArrayOop(to_obj);
! // We keep track of the next start index in the length field of the
! // to-space object.
! int next_index = to_obj_array->length();
! assert(0 <= next_index && next_index < length,
! err_msg("invariant, next index: %d, length: %d", next_index, length));
!
! int start = next_index;
! int end = length;
! int remainder = end - start;
! // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
! if (remainder > 2 * ParGCArrayScanChunk) {
! end = start + ParGCArrayScanChunk;
! to_obj_array->set_length(end);
! // Push the remainder before we process the range in case another
! // worker has run out of things to do and can steal it.
! oop* from_obj_p = set_partial_array_mask(from_obj);
! push_on_queue(from_obj_p);
! } else {
! assert(length == end, "sanity");
! // We'll process the final range for this object. Restore the length
! // so that the heap remains parsable in case of evacuation failure.
! to_obj_array->set_length(end);
! }
! _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
! // Process indexes [start,end). It will also process the header
! // along with the first chunk (i.e., the chunk with start == 0).
! // Note that at this point the length field of to_obj_array is not
! // correct given that we are using it to keep track of the next
! // start index. oop_iterate_range() (thankfully!) ignores the length
! // field and only relies on the start / end parameters. It does
! // however return the size of the object which will be incorrect. So
! // we have to ignore it even if we wanted to use it.
! to_obj_array->oop_iterate_range(&_scanner, start, end);
! }
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
--- 1922,1932 ----
inline oop clear_partial_array_mask(oop* ref) const {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
! inline void do_oop_partial_array(oop* p);
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
*** 2058,2087 ****
}
public:
oop copy_to_survivor_space(oop const obj);
! template <class T> void deal_with_reference(T* ref_to_scan) {
! if (!has_partial_array_mask(ref_to_scan)) {
! // Note: we can use "raw" versions of "region_containing" because
! // "obj_to_scan" is definitely in the heap, and is not in a
! // humongous region.
! HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
! do_oop_evac(ref_to_scan, r);
! } else {
! do_oop_partial_array((oop*)ref_to_scan);
! }
! }
! void deal_with_reference(StarTask ref) {
! assert(verify_task(ref), "sanity");
! if (ref.is_narrow()) {
! deal_with_reference((narrowOop*)ref);
! } else {
! deal_with_reference((oop*)ref);
! }
! }
public:
void trim_queue();
};
--- 1952,1964 ----
}
public:
oop copy_to_survivor_space(oop const obj);
! template <class T> inline void deal_with_reference(T* ref_to_scan);
! inline void deal_with_reference(StarTask ref);
public:
void trim_queue();
};