src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6166 : 8038498: Fix includes and C inlining after 8035330


 689   bool expand(size_t expand_bytes);
 690 
 691   // Do anything common to GC's.
 692   virtual void gc_prologue(bool full);
 693   virtual void gc_epilogue(bool full);
 694 
 695   // We register a region with the fast "in collection set" test. We
 696   // simply set to true the array slot corresponding to this region.
 697   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 698     assert(_in_cset_fast_test_base != NULL, "sanity");
 699     assert(r->in_collection_set(), "invariant");
 700     uint index = r->hrs_index();
 701     assert(index < _in_cset_fast_test_length, "invariant");
 702     assert(!_in_cset_fast_test_base[index], "invariant");
 703     _in_cset_fast_test_base[index] = true;
 704   }
 705 
 706   // This is a fast test on whether a reference points into the
 707   // collection set or not. Assume that the reference
 708   // points into the heap.
 709   bool in_cset_fast_test(oop obj) {
 710     assert(_in_cset_fast_test != NULL, "sanity");
 711     assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
 712     // no need to subtract the bottom of the heap from obj,
 713     // _in_cset_fast_test is biased
 714     uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
 715     bool ret = _in_cset_fast_test[index];
 716     // let's make sure the result is consistent with what the slower
 717     // test returns
 718     assert( ret || !obj_in_cs(obj), "sanity");
 719     assert(!ret ||  obj_in_cs(obj), "sanity");
 720     return ret;
 721   }
 722 
 723   void clear_cset_fast_test() {
 724     assert(_in_cset_fast_test_base != NULL, "sanity");
 725     memset(_in_cset_fast_test_base, false,
 726            (size_t) _in_cset_fast_test_length * sizeof(bool));
 727   }
 728 
 729   // This is called at the start of either a concurrent cycle or a Full
 730   // GC to update the number of old marking cycles started.
 731   void increment_old_marking_cycles_started();
 732 
 733   // This is called at the end of either a concurrent cycle or a Full
 734   // GC to update the number of old marking cycles completed. Those two
 735   // can happen in a nested fashion, i.e., we start a concurrent
 736   // cycle, a Full GC happens half-way through it which ends first,
 737   // and then the cycle notices that a Full GC happened and ends
 738   // too. The concurrent parameter is a boolean to help us do a bit
 739   // tighter consistency checking in the method. If concurrent is
 740   // false, the caller is the inner caller in the nesting (i.e., the
 741   // Full GC). If concurrent is true, the caller is the outer caller


1233   // Wrapper for the region list operations that can be called from
1234   // methods outside this class.
1235 
1236   void secondary_free_list_add(FreeRegionList* list) {
1237     _secondary_free_list.add_ordered(list);
1238   }
1239 
1240   void append_secondary_free_list() {
1241     _free_list.add_ordered(&_secondary_free_list);
1242   }
1243 
1244   void append_secondary_free_list_if_not_empty_with_lock() {
1245     // If the secondary free list looks empty there's no reason to
1246     // take the lock and then try to append it.
1247     if (!_secondary_free_list.is_empty()) {
1248       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1249       append_secondary_free_list();
1250     }
1251   }
1252 
1253   void old_set_remove(HeapRegion* hr) {
1254     _old_set.remove(hr);
1255   }
1256 
1257   size_t non_young_capacity_bytes() {
1258     return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1259   }
1260 
1261   void set_free_regions_coming();
1262   void reset_free_regions_coming();
1263   bool free_regions_coming() { return _free_regions_coming; }
1264   void wait_while_free_regions_coming();
1265 
1266   // Determine whether the given region is one that we are using as an
1267   // old GC alloc region.
1268   bool is_old_gc_alloc_region(HeapRegion* hr) {
1269     return hr == _retained_old_gc_alloc_region;
1270   }
1271 
1272   // Perform a collection of the heap; intended for use in implementing
1273   // "System.gc".  This probably implies as full a collection as the
1274   // "CollectedHeap" supports.
1275   virtual void collect(GCCause::Cause cause);


1326   virtual void oop_iterate(ExtendedOopClosure* cl);
1327 
1328   // Same as above, restricted to a memory region.
1329   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1330 
1331   // Iterate over all objects, calling "cl.do_object" on each.
1332   virtual void object_iterate(ObjectClosure* cl);
1333 
1334   virtual void safe_object_iterate(ObjectClosure* cl) {
1335     object_iterate(cl);
1336   }
1337 
1338   // Iterate over all spaces in use in the heap, in ascending address order.
1339   virtual void space_iterate(SpaceClosure* cl);
1340 
1341   // Iterate over heap regions, in address order, terminating the
1342   // iteration early if the "doHeapRegion" method returns "true".
1343   void heap_region_iterate(HeapRegionClosure* blk) const;
1344 
1345   // Return the region with the given index. It assumes the index is valid.
1346   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1347 
1348   // Divide the heap region sequence into "chunks" of some size (the number
1349   // of regions divided by the number of parallel threads times some
1350   // overpartition factor, currently 4).  Assumes that this will be called
1351   // in parallel by ParallelGCThreads worker threads with distinct worker
1352   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1353   // calls will use the same "claim_value", and that that claim value is
1354   // different from the claim_value of any heap region before the start of
1355   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1356   // attempting to claim the first region in each chunk, and, if
1357   // successful, applying the closure to each region in the chunk (and
1358   // setting the claim value of the second and subsequent regions of the
1359   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1360   // i.e., that a closure never attempt to abort a traversal.
1361   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1362                                        uint worker,
1363                                        uint no_of_par_workers,
1364                                        jint claim_value);
1365 
1366   // It resets all the region claim values to the default.


1455   // This permission only extends from the creation of a new object
1456   // via a TLAB up to the first subsequent safepoint. If such permission
1457   // is granted for this heap type, the compiler promises to call
1458   // defer_store_barrier() below on any slow path allocation of
1459   // a new object for which such initializing store barriers will
1460   // have been elided. G1, like CMS, allows this, but should be
1461   // ready to provide a compensating write barrier as necessary
1462   // if that storage came out of a non-young region. The efficiency
1463   // of this implementation depends crucially on being able to
1464   // answer very efficiently in constant time whether a piece of
1465   // storage in the heap comes from a young region or not.
1466   // See ReduceInitialCardMarks.
1467   virtual bool can_elide_tlab_store_barriers() const {
1468     return true;
1469   }
1470 
1471   virtual bool card_mark_must_follow_store() const {
1472     return true;
1473   }
1474 
1475   bool is_in_young(const oop obj) {
1476     HeapRegion* hr = heap_region_containing(obj);
1477     return hr != NULL && hr->is_young();
1478   }
1479 
1480 #ifdef ASSERT
1481   virtual bool is_in_partial_collection(const void* p);
1482 #endif
1483 
1484   virtual bool is_scavengable(const void* addr);
1485 
1486   // We don't need barriers for initializing stores to objects
1487   // in the young gen: for the SATB pre-barrier, there is no
1488   // pre-value that needs to be remembered; for the remembered-set
1489   // update logging post-barrier, we don't maintain remembered set
1490   // information for young gen objects.
1491   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1492     return is_in_young(new_obj);
1493   }
1494 
1495   // Returns "true" iff the given word_size is "very large".
1496   static bool isHumongous(size_t word_size) {
1497     // Note this has to be strictly greater-than as the TLABs
1498     // are capped at the humongous threshold and we want to
1499     // ensure that we don't try to allocate a TLAB as
1500     // humongous and that we don't allocate a humongous
1501     // object in a TLAB.
1502     return word_size > _humongous_object_threshold_in_words;
1503   }
1504 
1505   // Update mod union table with the set of dirty cards.
1506   void updateModUnion();
1507 
1508   // Set the mod union bits corresponding to the given memRegion.  Note
1509   // that this is always a safe operation, since it doesn't clear any
1510   // bits.
1511   void markModUnionRange(MemRegion mr);
1512 
1513   // Records the fact that a marking phase is no longer in progress.


1567       !hr->obj_allocated_since_prev_marking(obj) &&
1568       !isMarkedPrev(obj);
1569   }
1570 
1571   // This function returns true when an object has been
1572   // around since the previous marking and hasn't yet
1573   // been marked during this marking.
1574 
1575   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1576     return
1577       !hr->obj_allocated_since_next_marking(obj) &&
1578       !isMarkedNext(obj);
1579   }
1580 
1581   // Determine if an object is dead, given only the object itself.
1582   // This will find the region to which the object belongs and
1583   // then call the region version of the same function.
1584 
1585   // Added if it is NULL it isn't dead.
1586 
1587   bool is_obj_dead(const oop obj) const {
1588     const HeapRegion* hr = heap_region_containing(obj);
1589     if (hr == NULL) {
1590       if (obj == NULL) return false;
1591       else return true;
1592     }
1593     else return is_obj_dead(obj, hr);
1594   }
1595 
1596   bool is_obj_ill(const oop obj) const {
1597     const HeapRegion* hr = heap_region_containing(obj);
1598     if (hr == NULL) {
1599       if (obj == NULL) return false;
1600       else return true;
1601     }
1602     else return is_obj_ill(obj, hr);
1603   }
1604 
1605   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1606   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1607   bool is_marked(oop obj, VerifyOption vo);
1608   const char* top_at_mark_start_str(VerifyOption vo);
1609 
1610   ConcurrentMark* concurrent_mark() const { return _cm; }
1611 
1612   // Refinement
1613 
1614   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1615 
1616   // The dirty cards region list is used to record a subset of regions
1617   // whose cards need clearing. The list if populated during the
1618   // remembered set scanning and drained during the card table
1619   // cleanup. Although the methods are reentrant, population/draining
1620   // phases must not overlap. For synchronization purposes the last
1621   // element on the list points to itself.
1622   HeapRegion* _dirty_cards_region_list;
1623   void push_dirty_cards_region(HeapRegion* hr);


1677   // consistent most of the time, so most calls to this should use
1678   // vo == UsePrevMarking.
1679   // Currently, there is only one case where this is called with
1680   // vo == UseNextMarking, which is to verify the "next" marking
1681   // information at the end of remark.
1682   // Currently there is only one place where this is called with
1683   // vo == UseMarkWord, which is to verify the marking during a
1684   // full GC.
1685   void verify(bool silent, VerifyOption vo);
1686 
1687   // Override; it uses the "prev" marking information
1688   virtual void verify(bool silent);
1689 
1690   // The methods below are here for convenience and dispatch the
1691   // appropriate method depending on value of the given VerifyOption
1692   // parameter. The values for that parameter, and their meanings,
1693   // are the same as those above.
1694 
1695   bool is_obj_dead_cond(const oop obj,
1696                         const HeapRegion* hr,
1697                         const VerifyOption vo) const {
1698     switch (vo) {
1699     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1700     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1701     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1702     default:                            ShouldNotReachHere();
1703     }
1704     return false; // keep some compilers happy
1705   }
1706 
1707   bool is_obj_dead_cond(const oop obj,
1708                         const VerifyOption vo) const {
1709     switch (vo) {
1710     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1711     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1712     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1713     default:                            ShouldNotReachHere();
1714     }
1715     return false; // keep some compilers happy
1716   }
1717 
1718   // Printing
1719 
1720   virtual void print_on(outputStream* st) const;
1721   virtual void print_extended_on(outputStream* st) const;
1722   virtual void print_on_error(outputStream* st) const;
1723 
1724   virtual void print_gc_threads_on(outputStream* st) const;
1725   virtual void gc_threads_do(ThreadClosure* tc) const;
1726 
1727   // Override
1728   void print_tracing_info() const;
1729 
1730   // The following two methods are helpful for debugging RSet issues.
1731   void print_cset_rsets() PRODUCT_RETURN;
1732   void print_all_rsets() PRODUCT_RETURN;
1733 
1734 public:
1735   void stop_conc_gc_threads();
1736 


1790   double _start_strong_roots;
1791   double _strong_roots_time;
1792   double _start_term;
1793   double _term_time;
1794 
1795   // Map from young-age-index (0 == not young, 1 is youngest) to
1796   // surviving words. base is what we get back from the malloc call
1797   size_t* _surviving_young_words_base;
1798   // this points into the array, as we use the first few entries for padding
1799   size_t* _surviving_young_words;
1800 
1801 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1802 
1803   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1804 
1805   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
1806 
1807   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
1808   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
1809 
1810   template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1811     if (!from->is_survivor()) {
1812       _g1_rem->par_write_ref(from, p, tid);
1813     }
1814   }
1815 
1816   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1817     // If the new value of the field points to the same region or
1818     // is the to-space, we don't need to include it in the Rset updates.
1819     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1820       size_t card_index = ctbs()->index_for(p);
1821       // If the card hasn't been added to the buffer, do it.
1822       if (ctbs()->mark_card_deferred(card_index)) {
1823         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1824       }
1825     }
1826   }
1827 
1828 public:
1829   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1830 
1831   ~G1ParScanThreadState() {
1832     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1833   }
1834 


1836   ageTable*         age_table()       { return &_age_table;       }
1837 
1838   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1839     return _alloc_buffers[purpose];
1840   }
1841 
1842   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1843   size_t undo_waste() const                      { return _undo_waste; }
1844 
1845 #ifdef ASSERT
1846   bool verify_ref(narrowOop* ref) const;
1847   bool verify_ref(oop* ref) const;
1848   bool verify_task(StarTask ref) const;
1849 #endif // ASSERT
1850 
1851   template <class T> void push_on_queue(T* ref) {
1852     assert(verify_ref(ref), "sanity");
1853     refs()->push(ref);
1854   }
1855 
1856   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1857     if (G1DeferredRSUpdate) {
1858       deferred_rs_update(from, p, tid);
1859     } else {
1860       immediate_rs_update(from, p, tid);
1861     }
1862   }
1863 
1864   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1865     HeapWord* obj = NULL;
1866     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1867     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1868       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1869       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1870       alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1871 
1872       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1873       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1874       // Otherwise.
1875       alloc_buf->set_word_size(gclab_word_size);
1876       alloc_buf->set_buf(buf);
1877 
1878       obj = alloc_buf->allocate(word_sz);
1879       assert(obj != NULL, "buffer was definitely big enough...");
1880     } else {
1881       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1882     }


1966   // the work queue.
1967   inline bool has_partial_array_mask(narrowOop* ref) const {
1968     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1969     return false;
1970   }
1971 
1972   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1973   // We always encode partial arrays as regular oop, to allow the
1974   // specialization for has_partial_array_mask() for narrowOops above.
1975   // This means that unintentional use of this method with narrowOops are caught
1976   // by the compiler.
1977   inline oop* set_partial_array_mask(oop obj) const {
1978     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1979     return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1980   }
1981 
1982   inline oop clear_partial_array_mask(oop* ref) const {
1983     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1984   }
1985 
1986   void do_oop_partial_array(oop* p) {
1987     assert(has_partial_array_mask(p), "invariant");
1988     oop from_obj = clear_partial_array_mask(p);
1989 
1990     assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
1991     assert(from_obj->is_objArray(), "must be obj array");
1992     objArrayOop from_obj_array = objArrayOop(from_obj);
1993     // The from-space object contains the real length.
1994     int length                 = from_obj_array->length();
1995 
1996     assert(from_obj->is_forwarded(), "must be forwarded");
1997     oop to_obj                 = from_obj->forwardee();
1998     assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
1999     objArrayOop to_obj_array   = objArrayOop(to_obj);
2000     // We keep track of the next start index in the length field of the
2001     // to-space object.
2002     int next_index             = to_obj_array->length();
2003     assert(0 <= next_index && next_index < length,
2004            err_msg("invariant, next index: %d, length: %d", next_index, length));
2005 
2006     int start                  = next_index;
2007     int end                    = length;
2008     int remainder              = end - start;
2009     // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
2010     if (remainder > 2 * ParGCArrayScanChunk) {
2011       end = start + ParGCArrayScanChunk;
2012       to_obj_array->set_length(end);
2013       // Push the remainder before we process the range in case another
2014       // worker has run out of things to do and can steal it.
2015       oop* from_obj_p = set_partial_array_mask(from_obj);
2016       push_on_queue(from_obj_p);
2017     } else {
2018       assert(length == end, "sanity");
2019       // We'll process the final range for this object. Restore the length
2020       // so that the heap remains parsable in case of evacuation failure.
2021       to_obj_array->set_length(end);
2022     }
2023     _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
2024     // Process indexes [start,end). It will also process the header
2025     // along with the first chunk (i.e., the chunk with start == 0).
2026     // Note that at this point the length field of to_obj_array is not
2027     // correct given that we are using it to keep track of the next
2028     // start index. oop_iterate_range() (thankfully!) ignores the length
2029     // field and only relies on the start / end parameters.  It does
2030     // however return the size of the object which will be incorrect. So
2031     // we have to ignore it even if we wanted to use it.
2032     to_obj_array->oop_iterate_range(&_scanner, start, end);
2033   }
2034 
2035   // This method is applied to the fields of the objects that have just been copied.
2036   template <class T> void do_oop_evac(T* p, HeapRegion* from) {
2037     assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
2038            "Reference should not be NULL here as such are never pushed to the task queue.");
2039     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
2040 
2041     // Although we never intentionally push references outside of the collection
2042     // set, due to (benign) races in the claim mechanism during RSet scanning more
2043     // than one thread might claim the same card. So the same card may be
2044     // processed multiple times. So redo this check.
2045     if (_g1h->in_cset_fast_test(obj)) {
2046       oop forwardee;
2047       if (obj->is_forwarded()) {
2048         forwardee = obj->forwardee();
2049       } else {
2050         forwardee = copy_to_survivor_space(obj);
2051       }
2052       assert(forwardee != NULL, "forwardee should not be NULL");
2053       oopDesc::encode_store_heap_oop(p, forwardee);
2054     }
2055 
2056     assert(obj != NULL, "Must be");
2057     update_rs(from, p, queue_num());
2058   }
2059 public:
2060 
2061   oop copy_to_survivor_space(oop const obj);
2062 
2063   template <class T> void deal_with_reference(T* ref_to_scan) {
2064     if (!has_partial_array_mask(ref_to_scan)) {
2065       // Note: we can use "raw" versions of "region_containing" because
2066       // "obj_to_scan" is definitely in the heap, and is not in a
2067       // humongous region.
2068       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2069       do_oop_evac(ref_to_scan, r);
2070     } else {
2071       do_oop_partial_array((oop*)ref_to_scan);
2072     }
2073   }
2074 
2075   void deal_with_reference(StarTask ref) {
2076     assert(verify_task(ref), "sanity");
2077     if (ref.is_narrow()) {
2078       deal_with_reference((narrowOop*)ref);
2079     } else {
2080       deal_with_reference((oop*)ref);
2081     }
2082   }
2083 
2084 public:
2085   void trim_queue();
2086 };
2087 
2088 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP


 689   bool expand(size_t expand_bytes);
 690 
 691   // Do anything common to GC's.
 692   virtual void gc_prologue(bool full);
 693   virtual void gc_epilogue(bool full);
 694 
 695   // We register a region with the fast "in collection set" test. We
 696   // simply set to true the array slot corresponding to this region.
 697   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 698     assert(_in_cset_fast_test_base != NULL, "sanity");
 699     assert(r->in_collection_set(), "invariant");
 700     uint index = r->hrs_index();
 701     assert(index < _in_cset_fast_test_length, "invariant");
 702     assert(!_in_cset_fast_test_base[index], "invariant");
 703     _in_cset_fast_test_base[index] = true;
 704   }
 705 
 706   // This is a fast test on whether a reference points into the
 707   // collection set or not. Assume that the reference
 708   // points into the heap.
 709   inline bool in_cset_fast_test(oop obj);












 710 
 711   void clear_cset_fast_test() {
 712     assert(_in_cset_fast_test_base != NULL, "sanity");
 713     memset(_in_cset_fast_test_base, false,
 714            (size_t) _in_cset_fast_test_length * sizeof(bool));
 715   }
 716 
 717   // This is called at the start of either a concurrent cycle or a Full
 718   // GC to update the number of old marking cycles started.
 719   void increment_old_marking_cycles_started();
 720 
 721   // This is called at the end of either a concurrent cycle or a Full
 722   // GC to update the number of old marking cycles completed. Those two
 723   // can happen in a nested fashion, i.e., we start a concurrent
 724   // cycle, a Full GC happens half-way through it which ends first,
 725   // and then the cycle notices that a Full GC happened and ends
 726   // too. The concurrent parameter is a boolean to help us do a bit
 727   // tighter consistency checking in the method. If concurrent is
 728   // false, the caller is the inner caller in the nesting (i.e., the
 729   // Full GC). If concurrent is true, the caller is the outer caller


1221   // Wrapper for the region list operations that can be called from
1222   // methods outside this class.
1223 
1224   void secondary_free_list_add(FreeRegionList* list) {
1225     _secondary_free_list.add_ordered(list);
1226   }
1227 
1228   void append_secondary_free_list() {
1229     _free_list.add_ordered(&_secondary_free_list);
1230   }
1231 
1232   void append_secondary_free_list_if_not_empty_with_lock() {
1233     // If the secondary free list looks empty there's no reason to
1234     // take the lock and then try to append it.
1235     if (!_secondary_free_list.is_empty()) {
1236       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1237       append_secondary_free_list();
1238     }
1239   }
1240 
1241   inline void old_set_remove(HeapRegion* hr);


1242 
1243   size_t non_young_capacity_bytes() {
1244     return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1245   }
1246 
1247   void set_free_regions_coming();
1248   void reset_free_regions_coming();
1249   bool free_regions_coming() { return _free_regions_coming; }
1250   void wait_while_free_regions_coming();
1251 
1252   // Determine whether the given region is one that we are using as an
1253   // old GC alloc region.
1254   bool is_old_gc_alloc_region(HeapRegion* hr) {
1255     return hr == _retained_old_gc_alloc_region;
1256   }
1257 
1258   // Perform a collection of the heap; intended for use in implementing
1259   // "System.gc".  This probably implies as full a collection as the
1260   // "CollectedHeap" supports.
1261   virtual void collect(GCCause::Cause cause);


1312   virtual void oop_iterate(ExtendedOopClosure* cl);
1313 
1314   // Same as above, restricted to a memory region.
1315   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1316 
1317   // Iterate over all objects, calling "cl.do_object" on each.
1318   virtual void object_iterate(ObjectClosure* cl);
1319 
1320   virtual void safe_object_iterate(ObjectClosure* cl) {
1321     object_iterate(cl);
1322   }
1323 
1324   // Iterate over all spaces in use in the heap, in ascending address order.
1325   virtual void space_iterate(SpaceClosure* cl);
1326 
1327   // Iterate over heap regions, in address order, terminating the
1328   // iteration early if the "doHeapRegion" method returns "true".
1329   void heap_region_iterate(HeapRegionClosure* blk) const;
1330 
1331   // Return the region with the given index. It assumes the index is valid.
1332   inline HeapRegion* region_at(uint index) const;
1333 
1334   // Divide the heap region sequence into "chunks" of some size (the number
1335   // of regions divided by the number of parallel threads times some
1336   // overpartition factor, currently 4).  Assumes that this will be called
1337   // in parallel by ParallelGCThreads worker threads with distinct worker
1338   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1339   // calls will use the same "claim_value", and that that claim value is
1340   // different from the claim_value of any heap region before the start of
1341   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1342   // attempting to claim the first region in each chunk, and, if
1343   // successful, applying the closure to each region in the chunk (and
1344   // setting the claim value of the second and subsequent regions of the
1345   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1346   // i.e., that a closure never attempt to abort a traversal.
1347   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1348                                        uint worker,
1349                                        uint no_of_par_workers,
1350                                        jint claim_value);
1351 
1352   // It resets all the region claim values to the default.


1441   // This permission only extends from the creation of a new object
1442   // via a TLAB up to the first subsequent safepoint. If such permission
1443   // is granted for this heap type, the compiler promises to call
1444   // defer_store_barrier() below on any slow path allocation of
1445   // a new object for which such initializing store barriers will
1446   // have been elided. G1, like CMS, allows this, but should be
1447   // ready to provide a compensating write barrier as necessary
1448   // if that storage came out of a non-young region. The efficiency
1449   // of this implementation depends crucially on being able to
1450   // answer very efficiently in constant time whether a piece of
1451   // storage in the heap comes from a young region or not.
1452   // See ReduceInitialCardMarks.
1453   virtual bool can_elide_tlab_store_barriers() const {
1454     return true;
1455   }
1456 
1457   virtual bool card_mark_must_follow_store() const {
1458     return true;
1459   }
1460 
1461   inline bool is_in_young(const oop obj);



1462 
1463 #ifdef ASSERT
1464   virtual bool is_in_partial_collection(const void* p);
1465 #endif
1466 
1467   virtual bool is_scavengable(const void* addr);
1468 
1469   // We don't need barriers for initializing stores to objects
1470   // in the young gen: for the SATB pre-barrier, there is no
1471   // pre-value that needs to be remembered; for the remembered-set
1472   // update logging post-barrier, we don't maintain remembered set
1473   // information for young gen objects.
1474   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);


1475 
1476   // Returns "true" iff the given word_size is "very large".
1477   static bool isHumongous(size_t word_size) {
1478     // Note this has to be strictly greater-than as the TLABs
1479     // are capped at the humongous threshold and we want to
1480     // ensure that we don't try to allocate a TLAB as
1481     // humongous and that we don't allocate a humongous
1482     // object in a TLAB.
1483     return word_size > _humongous_object_threshold_in_words;
1484   }
1485 
1486   // Update mod union table with the set of dirty cards.
1487   void updateModUnion();
1488 
1489   // Set the mod union bits corresponding to the given memRegion.  Note
1490   // that this is always a safe operation, since it doesn't clear any
1491   // bits.
1492   void markModUnionRange(MemRegion mr);
1493 
1494   // Records the fact that a marking phase is no longer in progress.


1548       !hr->obj_allocated_since_prev_marking(obj) &&
1549       !isMarkedPrev(obj);
1550   }
1551 
1552   // This function returns true when an object has been
1553   // around since the previous marking and hasn't yet
1554   // been marked during this marking.
1555 
1556   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1557     return
1558       !hr->obj_allocated_since_next_marking(obj) &&
1559       !isMarkedNext(obj);
1560   }
1561 
1562   // Determine if an object is dead, given only the object itself.
1563   // This will find the region to which the object belongs and
1564   // then call the region version of the same function.
1565 
1566   // Added if it is NULL it isn't dead.
1567 
1568   inline bool is_obj_dead(const oop obj) const;
1569 
1570   inline bool is_obj_ill(const oop obj) const;














1571 
1572   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1573   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1574   bool is_marked(oop obj, VerifyOption vo);
1575   const char* top_at_mark_start_str(VerifyOption vo);
1576 
1577   ConcurrentMark* concurrent_mark() const { return _cm; }
1578 
1579   // Refinement
1580 
1581   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1582 
1583   // The dirty cards region list is used to record a subset of regions
1584   // whose cards need clearing. The list if populated during the
1585   // remembered set scanning and drained during the card table
1586   // cleanup. Although the methods are reentrant, population/draining
1587   // phases must not overlap. For synchronization purposes the last
1588   // element on the list points to itself.
1589   HeapRegion* _dirty_cards_region_list;
1590   void push_dirty_cards_region(HeapRegion* hr);


1644   // consistent most of the time, so most calls to this should use
1645   // vo == UsePrevMarking.
1646   // Currently, there is only one case where this is called with
1647   // vo == UseNextMarking, which is to verify the "next" marking
1648   // information at the end of remark.
1649   // Currently there is only one place where this is called with
1650   // vo == UseMarkWord, which is to verify the marking during a
1651   // full GC.
1652   void verify(bool silent, VerifyOption vo);
1653 
1654   // Override; it uses the "prev" marking information
1655   virtual void verify(bool silent);
1656 
1657   // The methods below are here for convenience and dispatch the
1658   // appropriate method depending on value of the given VerifyOption
1659   // parameter. The values for that parameter, and their meanings,
1660   // are the same as those above.
1661 
1662   bool is_obj_dead_cond(const oop obj,
1663                         const HeapRegion* hr,
1664                         const VerifyOption vo) const;








1665 
1666   bool is_obj_dead_cond(const oop obj,
1667                         const VerifyOption vo) const;








1668 
1669   // Printing
1670 
1671   virtual void print_on(outputStream* st) const;
1672   virtual void print_extended_on(outputStream* st) const;
1673   virtual void print_on_error(outputStream* st) const;
1674 
1675   virtual void print_gc_threads_on(outputStream* st) const;
1676   virtual void gc_threads_do(ThreadClosure* tc) const;
1677 
1678   // Override
1679   void print_tracing_info() const;
1680 
1681   // The following two methods are helpful for debugging RSet issues.
1682   void print_cset_rsets() PRODUCT_RETURN;
1683   void print_all_rsets() PRODUCT_RETURN;
1684 
1685 public:
1686   void stop_conc_gc_threads();
1687 


1741   double _start_strong_roots;
1742   double _strong_roots_time;
1743   double _start_term;
1744   double _term_time;
1745 
1746   // Map from young-age-index (0 == not young, 1 is youngest) to
1747   // surviving words. base is what we get back from the malloc call
1748   size_t* _surviving_young_words_base;
1749   // this points into the array, as we use the first few entries for padding
1750   size_t* _surviving_young_words;
1751 
1752 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1753 
1754   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1755 
1756   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
1757 
1758   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
1759   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
1760 
1761   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);




1762 
1763   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1764     // If the new value of the field points to the same region or
1765     // is the to-space, we don't need to include it in the Rset updates.
1766     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1767       size_t card_index = ctbs()->index_for(p);
1768       // If the card hasn't been added to the buffer, do it.
1769       if (ctbs()->mark_card_deferred(card_index)) {
1770         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1771       }
1772     }
1773   }
1774 
1775 public:
1776   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1777 
1778   ~G1ParScanThreadState() {
1779     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1780   }
1781 


1783   ageTable*         age_table()       { return &_age_table;       }
1784 
1785   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1786     return _alloc_buffers[purpose];
1787   }
1788 
1789   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1790   size_t undo_waste() const                      { return _undo_waste; }
1791 
1792 #ifdef ASSERT
1793   bool verify_ref(narrowOop* ref) const;
1794   bool verify_ref(oop* ref) const;
1795   bool verify_task(StarTask ref) const;
1796 #endif // ASSERT
1797 
1798   template <class T> void push_on_queue(T* ref) {
1799     assert(verify_ref(ref), "sanity");
1800     refs()->push(ref);
1801   }
1802 
1803   template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);






1804 
1805   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1806     HeapWord* obj = NULL;
1807     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1808     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1809       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1810       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1811       alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1812 
1813       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1814       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1815       // Otherwise.
1816       alloc_buf->set_word_size(gclab_word_size);
1817       alloc_buf->set_buf(buf);
1818 
1819       obj = alloc_buf->allocate(word_sz);
1820       assert(obj != NULL, "buffer was definitely big enough...");
1821     } else {
1822       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1823     }


1907   // the work queue.
1908   inline bool has_partial_array_mask(narrowOop* ref) const {
1909     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1910     return false;
1911   }
1912 
1913   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1914   // We always encode partial arrays as regular oop, to allow the
1915   // specialization for has_partial_array_mask() for narrowOops above.
1916   // This means that unintentional use of this method with narrowOops are caught
1917   // by the compiler.
1918   inline oop* set_partial_array_mask(oop obj) const {
1919     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1920     return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1921   }
1922 
1923   inline oop clear_partial_array_mask(oop* ref) const {
1924     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1925   }
1926 
1927   inline void do_oop_partial_array(oop* p);















































1928 
1929   // This method is applied to the fields of the objects that have just been copied.
1930   template <class T> void do_oop_evac(T* p, HeapRegion* from) {
1931     assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
1932            "Reference should not be NULL here as such are never pushed to the task queue.");
1933     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1934 
1935     // Although we never intentionally push references outside of the collection
1936     // set, due to (benign) races in the claim mechanism during RSet scanning more
1937     // than one thread might claim the same card. So the same card may be
1938     // processed multiple times. So redo this check.
1939     if (_g1h->in_cset_fast_test(obj)) {
1940       oop forwardee;
1941       if (obj->is_forwarded()) {
1942         forwardee = obj->forwardee();
1943       } else {
1944         forwardee = copy_to_survivor_space(obj);
1945       }
1946       assert(forwardee != NULL, "forwardee should not be NULL");
1947       oopDesc::encode_store_heap_oop(p, forwardee);
1948     }
1949 
1950     assert(obj != NULL, "Must be");
1951     update_rs(from, p, queue_num());
1952   }
1953 public:
1954 
1955   oop copy_to_survivor_space(oop const obj);
1956 
1957   template <class T> inline void deal_with_reference(T* ref_to_scan);










1958 
1959   inline void deal_with_reference(StarTask ref);







1960 
1961 public:
1962   void trim_queue();
1963 };
1964 
1965 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP