src/share/vm/gc_implementation/g1/g1RemSet.hpp

Print this page
rev 4561 : 7176479: G1: JVM crashes on T5-8 system with 1.5 TB heap
Summary: Refactor G1's hot card cache and card counts table into their own files. Simplify the card counts table, including removing the encoding of the card index in each entry. The card counts table now has a 1:1 correspondence with the cards spanned by heap. Space for the card counts table is reserved from virtual memory (rather than C heap) during JVM startup and is committed/expanded when the heap is expanded. Changes were also reviewed-by Vitaly Davidovich.
Reviewed-by:
   1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  49     DoDirtySync          = 2,
  50     LastSync             = 3,
  51 
  52     SeqTask              = 0,
  53     NumSeqTasks          = 1
  54   };
  55 
  56   CardTableModRefBS*     _ct_bs;
  57   SubTasksDone*          _seq_task;
  58   G1CollectorPolicy*     _g1p;
  59 
  60   ConcurrentG1Refine*    _cg1r;
  61 
  62   size_t*                _cards_scanned;
  63   size_t                 _total_cards_scanned;
  64 
  65   // Used for caching the closure that is responsible for scanning
  66   // references into the collection set.
  67   OopsInHeapRegionClosure** _cset_rs_update_cl;
  68 
  69   // The routine that performs the actual work of refining a dirty
  70   // card.
  71   // If check_for_refs_into_refs is true then a true result is returned
  72   // if the card contains oops that have references into the current
  73   // collection set.
  74   bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
  75                                     bool check_for_refs_into_cset);
  76 
  77 public:
  78   // This is called to reset dual hash tables after the gc pause
  79   // is finished and the initial hash table is no longer being
  80   // scanned.
  81   void cleanupHRRS();
  82 
  83   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  84   ~G1RemSet();
  85 
  86   // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  87   // outside the CS (having invoked "blk->set_region" to set the "from"
  88   // region correctly beforehand.) The "worker_i" param is for the
  89   // parallel case where the number of the worker thread calling this
  90   // function can be helpful in partitioning the work to be done. It
  91   // should be the same as the "i" passed to the calling thread's
  92   // work(i) function. In the sequential case this param will be ingored.
  93   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
  94                                    int worker_i);
  95 
  96   // Prepare for and cleanup after an oops_into_collection_set_do
  97   // call.  Must call each of these once before and after (in sequential
  98   // code) any threads call oops_into_collection_set_do.  (This offers an
  99   // opportunity to sequential setup and teardown of structures needed by a
 100   // parallel iteration over the CS's RS.)
 101   void prepare_for_oops_into_collection_set_do();
 102   void cleanup_after_oops_into_collection_set_do();
 103 
 104   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
 105   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 106 
 107   CardTableModRefBS* ct_bs() { return _ct_bs; }
 108   size_t cardsScanned() { return _total_cards_scanned; }
 109 
 110   // Record, if necessary, the fact that *p (where "p" is in region "from",
 111   // which is required to be non-NULL) has changed to a new non-NULL value.
 112   template <class T> void write_ref(HeapRegion* from, T* p);
 113   template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
 114 
 115   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 116   // or card, respectively, such that a region or card with a corresponding
 117   // 0 bit contains no part of any live object.  Eliminates any remembered
 118   // set entries that correspond to dead heap ranges.
 119   void scrub(BitMap* region_bm, BitMap* card_bm);
 120 
 121   // Like the above, but assumes is called in parallel: "worker_num" is the
 122   // parallel thread id of the current thread, and "claim_val" is the
 123   // value that should be used to claim heap regions.
 124   void scrub_par(BitMap* region_bm, BitMap* card_bm,
 125                  uint worker_num, int claim_val);
 126 
 127   // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
 128   // join and leave around parts that must be atomic wrt GC.  (NULL means
 129   // being done at a safepoint.)
 130   // If check_for_refs_into_cset is true, a true result is returned
 131   // if the given card contains oops that have references into the
 132   // current collection set.
 133   virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,

 134                                        bool check_for_refs_into_cset);
 135 
 136   // Print any relevant summary info.
 137   virtual void print_summary_info();
 138 
 139   // Prepare remembered set for verification.
 140   virtual void prepare_for_verify();
 141 };
 142 
 143 class CountNonCleanMemRegionClosure: public MemRegionClosure {
 144   G1CollectedHeap* _g1;
 145   int _n;
 146   HeapWord* _start_first;
 147 public:
 148   CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
 149     _g1(g1), _n(0), _start_first(NULL)
 150   {}
 151   void do_MemRegion(MemRegion mr);
 152   int n() { return _n; };
 153   HeapWord* start_first() { return _start_first; }


   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  49     DoDirtySync          = 2,
  50     LastSync             = 3,
  51 
  52     SeqTask              = 0,
  53     NumSeqTasks          = 1
  54   };
  55 
  56   CardTableModRefBS*     _ct_bs;
  57   SubTasksDone*          _seq_task;
  58   G1CollectorPolicy*     _g1p;
  59 
  60   ConcurrentG1Refine*    _cg1r;
  61 
  62   size_t*                _cards_scanned;
  63   size_t                 _total_cards_scanned;
  64 
  65   // Used for caching the closure that is responsible for scanning
  66   // references into the collection set.
  67   OopsInHeapRegionClosure** _cset_rs_update_cl;
  68 








  69 public:
  70   // This is called to reset dual hash tables after the gc pause
  71   // is finished and the initial hash table is no longer being
  72   // scanned.
  73   void cleanupHRRS();
  74 
  75   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  76   ~G1RemSet();
  77 
  78   // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  79   // outside the CS (having invoked "blk->set_region" to set the "from"
  80   // region correctly beforehand.) The "worker_i" param is for the
  81   // parallel case where the number of the worker thread calling this
  82   // function can be helpful in partitioning the work to be done. It
  83   // should be the same as the "i" passed to the calling thread's
  84   // work(i) function. In the sequential case this param will be ingored.
  85   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);

  86 
  87   // Prepare for and cleanup after an oops_into_collection_set_do
  88   // call.  Must call each of these once before and after (in sequential
  89   // code) any threads call oops_into_collection_set_do.  (This offers an
  90   // opportunity to sequential setup and teardown of structures needed by a
  91   // parallel iteration over the CS's RS.)
  92   void prepare_for_oops_into_collection_set_do();
  93   void cleanup_after_oops_into_collection_set_do();
  94 
  95   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
  96   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
  97 
  98   CardTableModRefBS* ct_bs() { return _ct_bs; }
  99   size_t cardsScanned() { return _total_cards_scanned; }
 100 
 101   // Record, if necessary, the fact that *p (where "p" is in region "from",
 102   // which is required to be non-NULL) has changed to a new non-NULL value.
 103   template <class T> void write_ref(HeapRegion* from, T* p);
 104   template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
 105 
 106   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 107   // or card, respectively, such that a region or card with a corresponding
 108   // 0 bit contains no part of any live object.  Eliminates any remembered
 109   // set entries that correspond to dead heap ranges.
 110   void scrub(BitMap* region_bm, BitMap* card_bm);
 111 
 112   // Like the above, but assumes is called in parallel: "worker_num" is the
 113   // parallel thread id of the current thread, and "claim_val" is the
 114   // value that should be used to claim heap regions.
 115   void scrub_par(BitMap* region_bm, BitMap* card_bm,
 116                  uint worker_num, int claim_val);
 117 
 118   // Refine the card corresponding to "card_ptr".


 119   // If check_for_refs_into_cset is true, a true result is returned
 120   // if the given card contains oops that have references into the
 121   // current collection set.
 122   virtual bool refine_card(jbyte* card_ptr,
 123                            int worker_i,
 124                            bool check_for_refs_into_cset);
 125 
 126   // Print any relevant summary info.
 127   virtual void print_summary_info();
 128 
 129   // Prepare remembered set for verification.
 130   virtual void prepare_for_verify();
 131 };
 132 
 133 class CountNonCleanMemRegionClosure: public MemRegionClosure {
 134   G1CollectedHeap* _g1;
 135   int _n;
 136   HeapWord* _start_first;
 137 public:
 138   CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
 139     _g1(g1), _n(0), _start_first(NULL)
 140   {}
 141   void do_MemRegion(MemRegion mr);
 142   int n() { return _n; };
 143   HeapWord* start_first() { return _start_first; }