1320 // See CollectedHeap for semantics.
1321
1322 virtual bool supports_tlab_allocation() const;
1323 virtual size_t tlab_capacity(Thread* thr) const;
1324 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1325
1326 // Can a compiler initialize a new object without store barriers?
1327 // This permission only extends from the creation of a new object
1328 // via a TLAB up to the first subsequent safepoint. If such permission
1329 // is granted for this heap type, the compiler promises to call
1330 // defer_store_barrier() below on any slow path allocation of
1331 // a new object for which such initializing store barriers will
1332 // have been elided. G1, like CMS, allows this, but should be
1333 // ready to provide a compensating write barrier as necessary
1334 // if that storage came out of a non-young region. The efficiency
1335 // of this implementation depends crucially on being able to
1336 // answer very efficiently in constant time whether a piece of
1337 // storage in the heap comes from a young region or not.
1338 // See ReduceInitialCardMarks.
1339 virtual bool can_elide_tlab_store_barriers() const {
1340 // 6920090: Temporarily disabled, because of lingering
1341 // instabilities related to RICM with G1. In the
1342 // interim, the option ReduceInitialCardMarksForG1
1343 // below is left solely as a debugging device at least
1344 // until 6920109 fixes the instabilities.
1345 return ReduceInitialCardMarksForG1;
1346 }
1347
1348 virtual bool card_mark_must_follow_store() const {
1349 return true;
1350 }
1351
1352 bool is_in_young(const oop obj) {
1353 HeapRegion* hr = heap_region_containing(obj);
1354 return hr != NULL && hr->is_young();
1355 }
1356
1357 #ifdef ASSERT
1358 virtual bool is_in_partial_collection(const void* p);
1359 #endif
1360
1361 virtual bool is_scavengable(const void* addr);
1362
1363 // We don't need barriers for initializing stores to objects
1364 // in the young gen: for the SATB pre-barrier, there is no
1365 // pre-value that needs to be remembered; for the remembered-set
1366 // update logging post-barrier, we don't maintain remembered set
1367 // information for young gen objects.
1368 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1369 // Re 6920090, 6920109 above.
1370 assert(ReduceInitialCardMarksForG1, "Else cannot be here");
1371 return is_in_young(new_obj);
1372 }
1373
1374 // Can a compiler elide a store barrier when it writes
1375 // a permanent oop into the heap? Applies when the compiler
1376 // is storing x to the heap, where x->is_perm() is true.
1377 virtual bool can_elide_permanent_oop_store_barriers() const {
1378 // At least until perm gen collection is also G1-ified, at
1379 // which point this should return false.
1380 return true;
1381 }
1382
1383 // Returns "true" iff the given word_size is "very large".
1384 static bool isHumongous(size_t word_size) {
1385 // Note this has to be strictly greater-than as the TLABs
1386 // are capped at the humongous thresold and we want to
1387 // ensure that we don't try to allocate a TLAB as
1388 // humongous and that we don't allocate a humongous
1389 // object in a TLAB.
1390 return word_size > _humongous_object_threshold_in_words;
|
1320 // See CollectedHeap for semantics.
1321
1322 virtual bool supports_tlab_allocation() const;
1323 virtual size_t tlab_capacity(Thread* thr) const;
1324 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1325
1326 // Can a compiler initialize a new object without store barriers?
1327 // This permission only extends from the creation of a new object
1328 // via a TLAB up to the first subsequent safepoint. If such permission
1329 // is granted for this heap type, the compiler promises to call
1330 // defer_store_barrier() below on any slow path allocation of
1331 // a new object for which such initializing store barriers will
1332 // have been elided. G1, like CMS, allows this, but should be
1333 // ready to provide a compensating write barrier as necessary
1334 // if that storage came out of a non-young region. The efficiency
1335 // of this implementation depends crucially on being able to
1336 // answer very efficiently in constant time whether a piece of
1337 // storage in the heap comes from a young region or not.
1338 // See ReduceInitialCardMarks.
1339 virtual bool can_elide_tlab_store_barriers() const {
1340 return true;
1341 }
1342
1343 virtual bool card_mark_must_follow_store() const {
1344 return true;
1345 }
1346
1347 bool is_in_young(const oop obj) {
1348 HeapRegion* hr = heap_region_containing(obj);
1349 return hr != NULL && hr->is_young();
1350 }
1351
1352 #ifdef ASSERT
1353 virtual bool is_in_partial_collection(const void* p);
1354 #endif
1355
1356 virtual bool is_scavengable(const void* addr);
1357
1358 // We don't need barriers for initializing stores to objects
1359 // in the young gen: for the SATB pre-barrier, there is no
1360 // pre-value that needs to be remembered; for the remembered-set
1361 // update logging post-barrier, we don't maintain remembered set
1362 // information for young gen objects.
1363 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1364 return is_in_young(new_obj);
1365 }
1366
1367 // Can a compiler elide a store barrier when it writes
1368 // a permanent oop into the heap? Applies when the compiler
1369 // is storing x to the heap, where x->is_perm() is true.
1370 virtual bool can_elide_permanent_oop_store_barriers() const {
1371 // At least until perm gen collection is also G1-ified, at
1372 // which point this should return false.
1373 return true;
1374 }
1375
1376 // Returns "true" iff the given word_size is "very large".
1377 static bool isHumongous(size_t word_size) {
1378 // Note this has to be strictly greater-than as the TLABs
1379 // are capped at the humongous thresold and we want to
1380 // ensure that we don't try to allocate a TLAB as
1381 // humongous and that we don't allocate a humongous
1382 // object in a TLAB.
1383 return word_size > _humongous_object_threshold_in_words;
|