< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 11747 : [mq]: per.hotspot.patch


1256   // This permission only extends from the creation of a new object
1257   // via a TLAB up to the first subsequent safepoint. If such permission
1258   // is granted for this heap type, the compiler promises to call
1259   // defer_store_barrier() below on any slow path allocation of
1260   // a new object for which such initializing store barriers will
1261   // have been elided. G1, like CMS, allows this, but should be
1262   // ready to provide a compensating write barrier as necessary
1263   // if that storage came out of a non-young region. The efficiency
1264   // of this implementation depends crucially on being able to
1265   // answer very efficiently in constant time whether a piece of
1266   // storage in the heap comes from a young region or not.
1267   // See ReduceInitialCardMarks.
1268   virtual bool can_elide_tlab_store_barriers() const {
1269     return true;
1270   }
1271 
1272   virtual bool card_mark_must_follow_store() const {
1273     return true;
1274   }
1275 
1276   // The reference pending list lock is acquired from from the
1277   // ConcurrentMarkThread.
1278   virtual bool needs_reference_pending_list_locker_thread() const {
1279     return true;
1280   }
1281 
1282   inline bool is_in_young(const oop obj);
1283 
1284   virtual bool is_scavengable(const void* addr);
1285 
1286   // We don't need barriers for initializing stores to objects
1287   // in the young gen: for the SATB pre-barrier, there is no
1288   // pre-value that needs to be remembered; for the remembered-set
1289   // update logging post-barrier, we don't maintain remembered set
1290   // information for young gen objects.
1291   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1292 
1293   // Returns "true" iff the given word_size is "very large".
1294   static bool is_humongous(size_t word_size) {
1295     // Note this has to be strictly greater-than as the TLABs
1296     // are capped at the humongous threshold and we want to
1297     // ensure that we don't try to allocate a TLAB as
1298     // humongous and that we don't allocate a humongous
1299     // object in a TLAB.
1300     return word_size > _humongous_object_threshold_in_words;
1301   }




1256   // This permission only extends from the creation of a new object
1257   // via a TLAB up to the first subsequent safepoint. If such permission
1258   // is granted for this heap type, the compiler promises to call
1259   // defer_store_barrier() below on any slow path allocation of
1260   // a new object for which such initializing store barriers will
1261   // have been elided. G1, like CMS, allows this, but should be
1262   // ready to provide a compensating write barrier as necessary
1263   // if that storage came out of a non-young region. The efficiency
1264   // of this implementation depends crucially on being able to
1265   // answer very efficiently in constant time whether a piece of
1266   // storage in the heap comes from a young region or not.
1267   // See ReduceInitialCardMarks.
1268   virtual bool can_elide_tlab_store_barriers() const {
1269     return true;
1270   }
1271 
1272   virtual bool card_mark_must_follow_store() const {
1273     return true;
1274   }
1275 






1276   inline bool is_in_young(const oop obj);
1277 
1278   virtual bool is_scavengable(const void* addr);
1279 
1280   // We don't need barriers for initializing stores to objects
1281   // in the young gen: for the SATB pre-barrier, there is no
1282   // pre-value that needs to be remembered; for the remembered-set
1283   // update logging post-barrier, we don't maintain remembered set
1284   // information for young gen objects.
1285   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1286 
1287   // Returns "true" iff the given word_size is "very large".
1288   static bool is_humongous(size_t word_size) {
1289     // Note this has to be strictly greater-than as the TLABs
1290     // are capped at the humongous threshold and we want to
1291     // ensure that we don't try to allocate a TLAB as
1292     // humongous and that we don't allocate a humongous
1293     // object in a TLAB.
1294     return word_size > _humongous_object_threshold_in_words;
1295   }


< prev index next >