src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 5732 : [mq]: comments2


1356   // Iterate over all objects, calling "cl.do_object" on each.
1357   virtual void object_iterate(ObjectClosure* cl);
1358 
1359   virtual void safe_object_iterate(ObjectClosure* cl) {
1360     object_iterate(cl);
1361   }
1362 
1363   // Iterate over all spaces in use in the heap, in ascending address order.
1364   virtual void space_iterate(SpaceClosure* cl);
1365 
1366   // Iterate over heap regions, in address order, terminating the
1367   // iteration early if the "doHeapRegion" method returns "true".
1368   void heap_region_iterate(HeapRegionClosure* blk) const;
1369 
1370   // Return the region with the given index. It assumes the index is valid.
1371   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1372 
1373   // Divide the heap region sequence into "chunks" of some size (the number
1374   // of regions divided by the number of parallel threads times some
1375   // overpartition factor, currently 4).  Assumes that this will be called
1376   // in parallel by ParallelGCThreads worker threads with discinct worker
1377   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1378   // calls will use the same "claim_value", and that that claim value is
1379   // different from the claim_value of any heap region before the start of
1380   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1381   // attempting to claim the first region in each chunk, and, if
1382   // successful, applying the closure to each region in the chunk (and
1383   // setting the claim value of the second and subsequent regions of the
1384   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1385   // i.e., that a closure never attempt to abort a traversal.
1386   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1387                                        uint worker,
1388                                        uint no_of_par_workers,
1389                                        jint claim_value);
1390 
1391   // It resets all the region claim values to the default.
1392   void reset_heap_region_claim_values();
1393 
1394   // Resets the claim values of regions in the current
1395   // collection set to the default.
1396   void reset_cset_heap_region_claim_values();


1501   }
1502 
1503 #ifdef ASSERT
1504   virtual bool is_in_partial_collection(const void* p);
1505 #endif
1506 
1507   virtual bool is_scavengable(const void* addr);
1508 
1509   // We don't need barriers for initializing stores to objects
1510   // in the young gen: for the SATB pre-barrier, there is no
1511   // pre-value that needs to be remembered; for the remembered-set
1512   // update logging post-barrier, we don't maintain remembered set
1513   // information for young gen objects.
1514   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1515     return is_in_young(new_obj);
1516   }
1517 
1518   // Returns "true" iff the given word_size is "very large".
1519   static bool isHumongous(size_t word_size) {
1520     // Note this has to be strictly greater-than as the TLABs
1521     // are capped at the humongous thresold and we want to
1522     // ensure that we don't try to allocate a TLAB as
1523     // humongous and that we don't allocate a humongous
1524     // object in a TLAB.
1525     return word_size > _humongous_object_threshold_in_words;
1526   }
1527 
1528   // Update mod union table with the set of dirty cards.
1529   void updateModUnion();
1530 
1531   // Set the mod union bits corresponding to the given memRegion.  Note
1532   // that this is always a safe operation, since it doesn't clear any
1533   // bits.
1534   void markModUnionRange(MemRegion mr);
1535 
1536   // Records the fact that a marking phase is no longer in progress.
1537   void set_marking_complete() {
1538     _mark_in_progress = false;
1539   }
1540   void set_marking_started() {
1541     _mark_in_progress = true;


1631   const char* top_at_mark_start_str(VerifyOption vo);
1632 
1633   ConcurrentMark* concurrent_mark() const { return _cm; }
1634 
1635   // Refinement
1636 
1637   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1638 
1639   // The dirty cards region list is used to record a subset of regions
1640   // whose cards need clearing. The list if populated during the
1641   // remembered set scanning and drained during the card table
1642   // cleanup. Although the methods are reentrant, population/draining
1643   // phases must not overlap. For synchronization purposes the last
1644   // element on the list points to itself.
1645   HeapRegion* _dirty_cards_region_list;
1646   void push_dirty_cards_region(HeapRegion* hr);
1647   HeapRegion* pop_dirty_cards_region();
1648 
1649   // Optimized nmethod scanning support routines
1650 
1651   // Register the given nmethod with the G1 heap
1652   virtual void register_nmethod(nmethod* nm);
1653 
1654   // Unregister the given nmethod from the G1 heap
1655   virtual void unregister_nmethod(nmethod* nm);
1656 
1657   // Migrate the nmethods in the code root lists of the regions
1658   // in the collection set to regions in to-space. In the event
1659   // of an evacuation failure, nmethods that reference objects
1660   // that were not successfullly evacuated are not migrated.
1661   void migrate_strong_code_roots();
1662 
1663   // During an initial mark pause, mark all the code roots that
1664   // point into regions *not* in the collection set.
1665   void mark_strong_code_roots(uint worker_id);
1666 
1667   // Rebuild the stong code root lists for each region
1668   // after a full GC
1669   void rebuild_strong_code_roots();
1670 
1671   // Verification
1672 
1673   // The following is just to alert the verification code
1674   // that a full collection has occurred and that the
1675   // remembered sets are no longer up to date.
1676   bool _full_collection;
1677   void set_full_collection() { _full_collection = true;}
1678   void clear_full_collection() {_full_collection = false;}
1679   bool full_collection() {return _full_collection;}
1680 
1681   // Perform any cleanup actions necessary before allowing a verification.
1682   virtual void prepare_for_verify();
1683 
1684   // Perform verification.
1685 
1686   // vo == UsePrevMarking  -> use "prev" marking information,
1687   // vo == UseNextMarking -> use "next" marking information
1688   // vo == UseMarkWord    -> use the mark word in the object header




1356   // Iterate over all objects, calling "cl.do_object" on each.
1357   virtual void object_iterate(ObjectClosure* cl);
1358 
1359   virtual void safe_object_iterate(ObjectClosure* cl) {
1360     object_iterate(cl);
1361   }
1362 
1363   // Iterate over all spaces in use in the heap, in ascending address order.
1364   virtual void space_iterate(SpaceClosure* cl);
1365 
1366   // Iterate over heap regions, in address order, terminating the
1367   // iteration early if the "doHeapRegion" method returns "true".
1368   void heap_region_iterate(HeapRegionClosure* blk) const;
1369 
1370   // Return the region with the given index. It assumes the index is valid.
1371   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1372 
1373   // Divide the heap region sequence into "chunks" of some size (the number
1374   // of regions divided by the number of parallel threads times some
1375   // overpartition factor, currently 4).  Assumes that this will be called
1376   // in parallel by ParallelGCThreads worker threads with distinct worker
1377   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1378   // calls will use the same "claim_value", and that that claim value is
1379   // different from the claim_value of any heap region before the start of
1380   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1381   // attempting to claim the first region in each chunk, and, if
1382   // successful, applying the closure to each region in the chunk (and
1383   // setting the claim value of the second and subsequent regions of the
1384   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1385   // i.e., that a closure never attempt to abort a traversal.
1386   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1387                                        uint worker,
1388                                        uint no_of_par_workers,
1389                                        jint claim_value);
1390 
1391   // It resets all the region claim values to the default.
1392   void reset_heap_region_claim_values();
1393 
1394   // Resets the claim values of regions in the current
1395   // collection set to the default.
1396   void reset_cset_heap_region_claim_values();


1501   }
1502 
1503 #ifdef ASSERT
1504   virtual bool is_in_partial_collection(const void* p);
1505 #endif
1506 
1507   virtual bool is_scavengable(const void* addr);
1508 
1509   // We don't need barriers for initializing stores to objects
1510   // in the young gen: for the SATB pre-barrier, there is no
1511   // pre-value that needs to be remembered; for the remembered-set
1512   // update logging post-barrier, we don't maintain remembered set
1513   // information for young gen objects.
1514   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1515     return is_in_young(new_obj);
1516   }
1517 
1518   // Returns "true" iff the given word_size is "very large".
1519   static bool isHumongous(size_t word_size) {
1520     // Note this has to be strictly greater-than as the TLABs
1521     // are capped at the humongous threshold and we want to
1522     // ensure that we don't try to allocate a TLAB as
1523     // humongous and that we don't allocate a humongous
1524     // object in a TLAB.
1525     return word_size > _humongous_object_threshold_in_words;
1526   }
1527 
1528   // Update mod union table with the set of dirty cards.
1529   void updateModUnion();
1530 
1531   // Set the mod union bits corresponding to the given memRegion.  Note
1532   // that this is always a safe operation, since it doesn't clear any
1533   // bits.
1534   void markModUnionRange(MemRegion mr);
1535 
1536   // Records the fact that a marking phase is no longer in progress.
1537   void set_marking_complete() {
1538     _mark_in_progress = false;
1539   }
1540   void set_marking_started() {
1541     _mark_in_progress = true;


1631   const char* top_at_mark_start_str(VerifyOption vo);
1632 
1633   ConcurrentMark* concurrent_mark() const { return _cm; }
1634 
1635   // Refinement
1636 
1637   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1638 
1639   // The dirty cards region list is used to record a subset of regions
1640   // whose cards need clearing. The list if populated during the
1641   // remembered set scanning and drained during the card table
1642   // cleanup. Although the methods are reentrant, population/draining
1643   // phases must not overlap. For synchronization purposes the last
1644   // element on the list points to itself.
1645   HeapRegion* _dirty_cards_region_list;
1646   void push_dirty_cards_region(HeapRegion* hr);
1647   HeapRegion* pop_dirty_cards_region();
1648 
1649   // Optimized nmethod scanning support routines
1650 
1651   // Register the given nmethod with the G1 heap.
1652   virtual void register_nmethod(nmethod* nm);
1653 
1654   // Unregister the given nmethod from the G1 heap.
1655   virtual void unregister_nmethod(nmethod* nm);
1656 
1657   // Migrate the nmethods in the code root lists of the regions
1658   // in the collection set to regions in to-space. In the event
1659   // of an evacuation failure, nmethods that reference objects
1660   // that were not successfully evacuated are not migrated.
1661   void migrate_strong_code_roots();
1662 
1663   // During an initial mark pause, mark all the code roots that
1664   // point into regions *not* in the collection set.
1665   void mark_strong_code_roots(uint worker_id);
1666 
1667   // Rebuild the strong code root lists for each region
1668   // after a full GC.
1669   void rebuild_strong_code_roots();
1670 
1671   // Verification
1672 
1673   // The following is just to alert the verification code
1674   // that a full collection has occurred and that the
1675   // remembered sets are no longer up to date.
1676   bool _full_collection;
1677   void set_full_collection() { _full_collection = true;}
1678   void clear_full_collection() {_full_collection = false;}
1679   bool full_collection() {return _full_collection;}
1680 
1681   // Perform any cleanup actions necessary before allowing a verification.
1682   virtual void prepare_for_verify();
1683 
1684   // Perform verification.
1685 
1686   // vo == UsePrevMarking  -> use "prev" marking information,
1687   // vo == UseNextMarking -> use "next" marking information
1688   // vo == UseMarkWord    -> use the mark word in the object header


src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File