748 // The hot card cache for remembered set insertion optimization.
749 G1HotCardCache* _hot_card_cache;
750
751 // The g1 remembered set of the heap.
752 G1RemSet* _g1_rem_set;
753
754 // A set of cards that cover the objects for which the Rsets should be updated
755 // concurrently after the collection.
756 DirtyCardQueueSet _dirty_card_queue_set;
757
758 // After a collection pause, convert the regions in the collection set into free
759 // regions.
760 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_
761
762 // Abandon the current collection set without recording policy
763 // statistics or updating free lists.
764 void abandon_collection_set(G1CollectionSet* collection_set);
765
766 // The concurrent marker (and the thread it runs in.)
767 G1ConcurrentMark* _cm;
768 G1ConcurrentMarkThread* _cmThread;
769
770 // The concurrent refiner.
771 G1ConcurrentRefine* _cr;
772
773 // The parallel task queues
774 RefToScanQueueSet *_task_queues;
775
776 // True iff a evacuation has failed in the current collection.
777 bool _evacuation_failed;
778
779 EvacuationFailedInfo* _evacuation_failed_info_array;
780
781 // Failed evacuations cause some logical from-space objects to have
782 // forwarding pointers to themselves. Reset them.
783 void remove_self_forwarding_pointers();
784
785 // Restore the objects in the regions in the collection set after an
786 // evacuation failure.
787 void restore_after_evac_failure();
|
748 // The hot card cache for remembered set insertion optimization.
749 G1HotCardCache* _hot_card_cache;
750
751 // The g1 remembered set of the heap.
752 G1RemSet* _g1_rem_set;
753
754 // A set of cards that cover the objects for which the Rsets should be updated
755 // concurrently after the collection.
756 DirtyCardQueueSet _dirty_card_queue_set;
757
758 // After a collection pause, convert the regions in the collection set into free
759 // regions.
760 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_
761
762 // Abandon the current collection set without recording policy
763 // statistics or updating free lists.
764 void abandon_collection_set(G1CollectionSet* collection_set);
765
766 // The concurrent marker (and the thread it runs in.)
767 G1ConcurrentMark* _cm;
768 G1ConcurrentMarkThread* _cm_thread;
769
770 // The concurrent refiner.
771 G1ConcurrentRefine* _cr;
772
773 // The parallel task queues
774 RefToScanQueueSet *_task_queues;
775
776 // True iff a evacuation has failed in the current collection.
777 bool _evacuation_failed;
778
779 EvacuationFailedInfo* _evacuation_failed_info_array;
780
781 // Failed evacuations cause some logical from-space objects to have
782 // forwarding pointers to themselves. Reset them.
783 void remove_self_forwarding_pointers();
784
785 // Restore the objects in the regions in the collection set after an
786 // evacuation failure.
787 void restore_after_evac_failure();
|
1235 }
1236
1237 uint old_regions_count() const { return _old_set.length(); }
1238
1239 uint humongous_regions_count() const { return _humongous_set.length(); }
1240
1241 #ifdef ASSERT
1242 bool check_young_list_empty();
1243 #endif
1244
1245 // *** Stuff related to concurrent marking. It's not clear to me that so
1246 // many of these need to be public.
1247
1248 // The functions below are helper functions that a subclass of
1249 // "CollectedHeap" can use in the implementation of its virtual
1250 // functions.
1251 // This performs a concurrent marking of the live objects in a
1252 // bitmap off to the side.
1253 void do_concurrent_mark();
1254
1255 bool isMarkedNext(oop obj) const;
1256
1257 // Determine if an object is dead, given the object and also
1258 // the region to which the object belongs. An object is dead
1259 // iff a) it was not allocated since the last mark, b) it
1260 // is not marked, and c) it is not in an archive region.
1261 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1262 return
1263 hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1264 !hr->is_archive();
1265 }
1266
1267 // This function returns true when an object has been
1268 // around since the previous marking and hasn't yet
1269 // been marked during this marking, and is not in an archive region.
1270 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1271 return
1272 !hr->obj_allocated_since_next_marking(obj) &&
1273 !isMarkedNext(obj) &&
1274 !hr->is_archive();
1275 }
1276
1277 // Determine if an object is dead, given only the object itself.
1278 // This will find the region to which the object belongs and
1279 // then call the region version of the same function.
1280
1281 // Added if it is NULL it isn't dead.
1282
1283 inline bool is_obj_dead(const oop obj) const;
1284
1285 inline bool is_obj_ill(const oop obj) const;
1286
1287 inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1288 inline bool is_obj_dead_full(const oop obj) const;
1289
1290 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1291
1292 // Refinement
|
1235 }
1236
1237 uint old_regions_count() const { return _old_set.length(); }
1238
1239 uint humongous_regions_count() const { return _humongous_set.length(); }
1240
1241 #ifdef ASSERT
1242 bool check_young_list_empty();
1243 #endif
1244
1245 // *** Stuff related to concurrent marking. It's not clear to me that so
1246 // many of these need to be public.
1247
1248 // The functions below are helper functions that a subclass of
1249 // "CollectedHeap" can use in the implementation of its virtual
1250 // functions.
1251 // This performs a concurrent marking of the live objects in a
1252 // bitmap off to the side.
1253 void do_concurrent_mark();
1254
1255 bool is_marked_next(oop obj) const;
1256
1257 // Determine if an object is dead, given the object and also
1258 // the region to which the object belongs. An object is dead
1259 // iff a) it was not allocated since the last mark, b) it
1260 // is not marked, and c) it is not in an archive region.
1261 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1262 return
1263 hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1264 !hr->is_archive();
1265 }
1266
1267 // This function returns true when an object has been
1268 // around since the previous marking and hasn't yet
1269 // been marked during this marking, and is not in an archive region.
1270 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1271 return
1272 !hr->obj_allocated_since_next_marking(obj) &&
1273 !is_marked_next(obj) &&
1274 !hr->is_archive();
1275 }
1276
1277 // Determine if an object is dead, given only the object itself.
1278 // This will find the region to which the object belongs and
1279 // then call the region version of the same function.
1280
1281 // Added if it is NULL it isn't dead.
1282
1283 inline bool is_obj_dead(const oop obj) const;
1284
1285 inline bool is_obj_ill(const oop obj) const;
1286
1287 inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1288 inline bool is_obj_dead_full(const oop obj) const;
1289
1290 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1291
1292 // Refinement
|