src/share/vm/memory/referenceProcessor.hpp
Print this page
rev 2661 : [mq]: g1-reference-processing
*** 46,86 ****
// (with appropriate modifications) to any "non-convex interval".
// forward references
class ReferencePolicy;
class AbstractRefProcTaskExecutor;
! class DiscoveredList;
class ReferenceProcessor : public CHeapObj {
protected:
// Compatibility with pre-4965777 JDK's
static bool _pending_list_uses_discovered_field;
MemRegion _span; // (right-open) interval of heap
// subject to wkref discovery
bool _discovering_refs; // true when discovery enabled
bool _discovery_is_atomic; // if discovery is atomic wrt
// other collectors in configuration
bool _discovery_is_mt; // true if reference discovery is MT.
// If true, setting "next" field of a discovered refs list requires
// write barrier(s). (Must be true if used in a collector in which
// elements of a discovered list may be moved during discovery: for
// example, a collector like Garbage-First that moves objects during a
// long-term concurrent marking phase that does weak reference
// discovery.)
bool _discovered_list_needs_barrier;
BarrierSet* _bs; // Cached copy of BarrierSet.
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
! // For collectors that do not keep GC marking information
// in the object header, this field holds a closure that
// helps the reference processor determine the reachability
! // of an oop (the field is currently initialized to NULL for
! // all collectors but the CMS collector).
BoolObjectClosure* _is_alive_non_header;
// Soft ref clearing policies
// . the default policy
static ReferencePolicy* _default_soft_ref_policy;
--- 46,244 ----
// (with appropriate modifications) to any "non-convex interval".
// forward references
class ReferencePolicy;
class AbstractRefProcTaskExecutor;
!
! // List of discovered references.
! class DiscoveredList {
! public:
! DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
! oop head() const {
! return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
! _oop_head;
! }
! HeapWord* adr_head() {
! return UseCompressedOops ? (HeapWord*)&_compressed_head :
! (HeapWord*)&_oop_head;
! }
! void set_head(oop o) {
! if (UseCompressedOops) {
! // Must compress the head ptr.
! _compressed_head = oopDesc::encode_heap_oop(o);
! } else {
! _oop_head = o;
! }
! }
! bool is_empty() const { return head() == NULL; }
! size_t length() { return _len; }
! void set_length(size_t len) { _len = len; }
! void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
! void dec_length(size_t dec) { _len -= dec; }
! private:
! // Set value depending on UseCompressedOops. This could be a template class
! // but then we have to fix all the instantiations and declarations that use this class.
! oop _oop_head;
! narrowOop _compressed_head;
! size_t _len;
! };
!
! // Iterator for the list of discovered references.
! class DiscoveredListIterator {
! private:
! DiscoveredList& _refs_list;
! HeapWord* _prev_next;
! oop _prev;
! oop _ref;
! HeapWord* _discovered_addr;
! oop _next;
! HeapWord* _referent_addr;
! oop _referent;
! OopClosure* _keep_alive;
! BoolObjectClosure* _is_alive;
!
! DEBUG_ONLY(
! oop _first_seen; // cyclic linked list check
! )
!
! NOT_PRODUCT(
! size_t _processed;
! size_t _removed;
! )
!
! public:
! inline DiscoveredListIterator(DiscoveredList& refs_list,
! OopClosure* keep_alive,
! BoolObjectClosure* is_alive):
! _refs_list(refs_list),
! _prev_next(refs_list.adr_head()),
! _prev(NULL),
! _ref(refs_list.head()),
! #ifdef ASSERT
! _first_seen(refs_list.head()),
! #endif
! #ifndef PRODUCT
! _processed(0),
! _removed(0),
! #endif
! _next(NULL),
! _keep_alive(keep_alive),
! _is_alive(is_alive)
! { }
!
! // End Of List.
! inline bool has_next() const { return _ref != NULL; }
!
! // Get oop to the Reference object.
! inline oop obj() const { return _ref; }
!
! // Get oop to the referent object.
! inline oop referent() const { return _referent; }
!
! // Returns true if referent is alive.
! inline bool is_referent_alive() const {
! return _is_alive->do_object_b(_referent);
! }
!
! // Loads data for the current reference.
! // The "allow_null_referent" argument tells us to allow for the possibility
! // of a NULL referent in the discovered Reference object. This typically
! // happens in the case of concurrent collectors that may have done the
! // discovery concurrently, or interleaved, with mutator execution.
! void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
!
! // Move to the next discovered reference.
! inline void next() {
! _prev_next = _discovered_addr;
! _prev = _ref;
! move_to_next();
! }
!
! // Remove the current reference from the list
! void remove();
!
! // Make the Reference object active again.
! void make_active();
!
! // Make the referent alive.
! inline void make_referent_alive() {
! if (UseCompressedOops) {
! _keep_alive->do_oop((narrowOop*)_referent_addr);
! } else {
! _keep_alive->do_oop((oop*)_referent_addr);
! }
! }
!
! // Update the discovered field.
! inline void update_discovered() {
! // First _prev_next ref actually points into DiscoveredList (gross).
! if (UseCompressedOops) {
! if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
! _keep_alive->do_oop((narrowOop*)_prev_next);
! }
! } else {
! if (!oopDesc::is_null(*(oop*)_prev_next)) {
! _keep_alive->do_oop((oop*)_prev_next);
! }
! }
! }
!
! // NULL out referent pointer.
! void clear_referent();
!
! // Statistics
! NOT_PRODUCT(
! inline size_t processed() const { return _processed; }
! inline size_t removed() const { return _removed; }
! )
!
! inline void move_to_next() {
! if (_ref == _next) {
! // End of the list.
! _ref = NULL;
! } else {
! _ref = _next;
! }
! assert(_ref != _first_seen, "cyclic ref_list found");
! NOT_PRODUCT(_processed++);
! }
!
! };
class ReferenceProcessor : public CHeapObj {
protected:
// Compatibility with pre-4965777 JDK's
static bool _pending_list_uses_discovered_field;
+
MemRegion _span; // (right-open) interval of heap
// subject to wkref discovery
+
bool _discovering_refs; // true when discovery enabled
bool _discovery_is_atomic; // if discovery is atomic wrt
// other collectors in configuration
bool _discovery_is_mt; // true if reference discovery is MT.
+
// If true, setting "next" field of a discovered refs list requires
// write barrier(s). (Must be true if used in a collector in which
// elements of a discovered list may be moved during discovery: for
// example, a collector like Garbage-First that moves objects during a
// long-term concurrent marking phase that does weak reference
// discovery.)
bool _discovered_list_needs_barrier;
+
BarrierSet* _bs; // Cached copy of BarrierSet.
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
! // For collectors that do not keep GC liveness information
// in the object header, this field holds a closure that
// helps the reference processor determine the reachability
! // of an oop. It is currently initialized to NULL for all
! // collectors except for CMS and G1.
BoolObjectClosure* _is_alive_non_header;
// Soft ref clearing policies
// . the default policy
static ReferencePolicy* _default_soft_ref_policy;
*** 100,109 ****
--- 258,269 ----
DiscoveredList* _discoveredWeakRefs;
DiscoveredList* _discoveredFinalRefs;
DiscoveredList* _discoveredPhantomRefs;
public:
+ static int subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+
int num_q() { return _num_q; }
int max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
ReferencePolicy* setup_policy(bool always_clear) {