60 // possible subtypes (but not that they are subtypes!) Return NULL if 61 // the cast is invalid. 62 virtual CardTableRS* as_CardTableRS() { return NULL; } 63 64 // Return the barrier set associated with "this." 65 BarrierSet* bs() { return _bs; } 66 67 // Set the barrier set. 68 void set_bs(BarrierSet* bs) { _bs = bs; } 69 70 KlassRemSet* klass_rem_set() { return &_klass_rem_set; } 71 72 // Do any (sequential) processing necessary to prepare for (possibly 73 // "parallel", if that arg is true) calls to younger_refs_iterate. 74 virtual void prepare_for_younger_refs_iterate(bool parallel) = 0; 75 76 // Apply the "do_oop" method of "blk" to (exactly) all oop locations 77 // 1) that are in objects allocated in "g" at the time of the last call 78 // to "save_Marks", and 79 // 2) that point to objects in younger generations. 80 virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0; 81 82 virtual void younger_refs_in_space_iterate(Space* sp, 83 OopsInGenClosure* cl) = 0; 84 85 // This method is used to notify the remembered set that "new_val" has 86 // been written into "field" by the garbage collector. 87 void write_ref_field_gc(void* field, oop new_val); 88 protected: 89 virtual void write_ref_field_gc_work(void* field, oop new_val) = 0; 90 public: 91 92 // A version of the above suitable for use by parallel collectors. 93 virtual void write_ref_field_gc_par(void* field, oop new_val) = 0; 94 95 // Resize one of the regions covered by the remembered set. 96 virtual void resize_covered_region(MemRegion new_region) = 0; 97 98 // If the rem set imposes any alignment restrictions on boundaries 99 // within the heap, this function tells whether they are met. 100 virtual bool is_aligned(HeapWord* addr) = 0; 101 102 // Returns any alignment constraint that the remembered set imposes upon the 103 // heap. | 60 // possible subtypes (but not that they are subtypes!) Return NULL if 61 // the cast is invalid. 62 virtual CardTableRS* as_CardTableRS() { return NULL; } 63 64 // Return the barrier set associated with "this." 65 BarrierSet* bs() { return _bs; } 66 67 // Set the barrier set. 68 void set_bs(BarrierSet* bs) { _bs = bs; } 69 70 KlassRemSet* klass_rem_set() { return &_klass_rem_set; } 71 72 // Do any (sequential) processing necessary to prepare for (possibly 73 // "parallel", if that arg is true) calls to younger_refs_iterate. 74 virtual void prepare_for_younger_refs_iterate(bool parallel) = 0; 75 76 // Apply the "do_oop" method of "blk" to (exactly) all oop locations 77 // 1) that are in objects allocated in "g" at the time of the last call 78 // to "save_Marks", and 79 // 2) that point to objects in younger generations. 80 virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0; 81 82 virtual void younger_refs_in_space_iterate(Space* sp, 83 OopsInGenClosure* cl, 84 uint n_threads) = 0; 85 86 // This method is used to notify the remembered set that "new_val" has 87 // been written into "field" by the garbage collector. 88 void write_ref_field_gc(void* field, oop new_val); 89 protected: 90 virtual void write_ref_field_gc_work(void* field, oop new_val) = 0; 91 public: 92 93 // A version of the above suitable for use by parallel collectors. 94 virtual void write_ref_field_gc_par(void* field, oop new_val) = 0; 95 96 // Resize one of the regions covered by the remembered set. 97 virtual void resize_covered_region(MemRegion new_region) = 0; 98 99 // If the rem set imposes any alignment restrictions on boundaries 100 // within the heap, this function tells whether they are met. 101 virtual bool is_aligned(HeapWord* addr) = 0; 102 103 // Returns any alignment constraint that the remembered set imposes upon the 104 // heap. |