789 // The g1 remembered set of the heap.
790 G1RemSet* _g1_rem_set;
791
792 // A set of cards that cover the objects for which the Rsets should be updated
793 // concurrently after the collection.
794 DirtyCardQueueSet _dirty_card_queue_set;
795
796 // After a collection pause, convert the regions in the collection set into free
797 // regions.
798 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
799
800 // Abandon the current collection set without recording policy
801 // statistics or updating free lists.
802 void abandon_collection_set(G1CollectionSet* collection_set);
803
804 // The concurrent marker (and the thread it runs in.)
805 G1ConcurrentMark* _cm;
806 ConcurrentMarkThread* _cmThread;
807
808 // The concurrent refiner.
809 G1ConcurrentRefine* _cg1r;
810
811 // The parallel task queues
812 RefToScanQueueSet *_task_queues;
813
814 // True iff a evacuation has failed in the current collection.
815 bool _evacuation_failed;
816
817 EvacuationFailedInfo* _evacuation_failed_info_array;
818
819 // Failed evacuations cause some logical from-space objects to have
820 // forwarding pointers to themselves. Reset them.
821 void remove_self_forwarding_pointers();
822
823 // Restore the objects in the regions in the collection set after an
824 // evacuation failure.
825 void restore_after_evac_failure();
826
827 PreservedMarksSet _preserved_marks_set;
828
829 // Preserve the mark of "obj", if necessary, in preparation for its mark
1372 return
1373 !hr->obj_allocated_since_next_marking(obj) &&
1374 !isMarkedNext(obj) &&
1375 !hr->is_archive();
1376 }
1377
1378 // Determine if an object is dead, given only the object itself.
1379 // This will find the region to which the object belongs and
1380 // then call the region version of the same function.
1381
1382 // Added if it is NULL it isn't dead.
1383
1384 inline bool is_obj_dead(const oop obj) const;
1385
1386 inline bool is_obj_ill(const oop obj) const;
1387
1388 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1389
1390 // Refinement
1391
1392 G1ConcurrentRefine* concurrent_g1_refine() const { return _cg1r; }
1393
1394 // Optimized nmethod scanning support routines
1395
1396 // Is an oop scavengeable
1397 virtual bool is_scavengable(oop obj);
1398
1399 // Register the given nmethod with the G1 heap.
1400 virtual void register_nmethod(nmethod* nm);
1401
1402 // Unregister the given nmethod from the G1 heap.
1403 virtual void unregister_nmethod(nmethod* nm);
1404
1405 // Free up superfluous code root memory.
1406 void purge_code_root_memory();
1407
1408 // Rebuild the strong code root lists for each region
1409 // after a full GC.
1410 void rebuild_strong_code_roots();
1411
1412 // Partial cleaning used when class unloading is disabled.
|
789 // The g1 remembered set of the heap.
790 G1RemSet* _g1_rem_set;
791
792 // A set of cards that cover the objects for which the Rsets should be updated
793 // concurrently after the collection.
794 DirtyCardQueueSet _dirty_card_queue_set;
795
796 // After a collection pause, convert the regions in the collection set into free
797 // regions.
798 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
799
800 // Abandon the current collection set without recording policy
801 // statistics or updating free lists.
802 void abandon_collection_set(G1CollectionSet* collection_set);
803
804 // The concurrent marker (and the thread it runs in.)
805 G1ConcurrentMark* _cm;
806 ConcurrentMarkThread* _cmThread;
807
808 // The concurrent refiner.
809 G1ConcurrentRefine* _cr;
810
811 // The parallel task queues
812 RefToScanQueueSet *_task_queues;
813
814 // True iff a evacuation has failed in the current collection.
815 bool _evacuation_failed;
816
817 EvacuationFailedInfo* _evacuation_failed_info_array;
818
819 // Failed evacuations cause some logical from-space objects to have
820 // forwarding pointers to themselves. Reset them.
821 void remove_self_forwarding_pointers();
822
823 // Restore the objects in the regions in the collection set after an
824 // evacuation failure.
825 void restore_after_evac_failure();
826
827 PreservedMarksSet _preserved_marks_set;
828
829 // Preserve the mark of "obj", if necessary, in preparation for its mark
1372 return
1373 !hr->obj_allocated_since_next_marking(obj) &&
1374 !isMarkedNext(obj) &&
1375 !hr->is_archive();
1376 }
1377
1378 // Determine if an object is dead, given only the object itself.
1379 // This will find the region to which the object belongs and
1380 // then call the region version of the same function.
1381
1382 // Added if it is NULL it isn't dead.
1383
1384 inline bool is_obj_dead(const oop obj) const;
1385
1386 inline bool is_obj_ill(const oop obj) const;
1387
1388 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1389
1390 // Refinement
1391
1392 G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1393
1394 // Optimized nmethod scanning support routines
1395
1396 // Is an oop scavengeable
1397 virtual bool is_scavengable(oop obj);
1398
1399 // Register the given nmethod with the G1 heap.
1400 virtual void register_nmethod(nmethod* nm);
1401
1402 // Unregister the given nmethod from the G1 heap.
1403 virtual void unregister_nmethod(nmethod* nm);
1404
1405 // Free up superfluous code root memory.
1406 void purge_code_root_memory();
1407
1408 // Rebuild the strong code root lists for each region
1409 // after a full GC.
1410 void rebuild_strong_code_roots();
1411
1412 // Partial cleaning used when class unloading is disabled.
|