< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56145 : 8230431: Move G1 trace code from gcTrace* to G1 directory
Reviewed-by:
rev 56146 : 8209802: Garbage collectors should register JFR types themselves to avoid build errors.


1550   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1551 
1552   // Override the default _filler_array_max_size so that no humongous filler
1553   // objects are created.
1554   _filler_array_max_size = _humongous_object_threshold_in_words;
1555 
1556   uint n_queues = ParallelGCThreads;
1557   _task_queues = new RefToScanQueueSet(n_queues);
1558 
1559   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1560 
1561   for (uint i = 0; i < n_queues; i++) {
1562     RefToScanQueue* q = new RefToScanQueue();
1563     q->initialize();
1564     _task_queues->register_queue(i, q);
1565     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1566   }
1567 
1568   // Initialize the G1EvacuationFailureALot counters and flags.
1569   NOT_PRODUCT(reset_evacuation_should_fail();)

1570 
1571   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1572 }
1573 
1574 static size_t actual_reserved_page_size(ReservedSpace rs) {
1575   size_t page_size = os::vm_page_size();
1576   if (UseLargePages) {
1577     // There are two ways to manage large page memory.
1578     // 1. OS supports committing large page memory.
1579     // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
1580     //    And ReservedSpace calls it 'special'. If we failed to set 'special',
1581     //    we reserved memory without large page.
1582     if (os::can_commit_large_page_memory() || rs.special()) {
1583       // An alignment at ReservedSpace comes from preferred page size or
1584       // heap alignment, and if the alignment came from heap alignment, it could be
1585       // larger than large pages size. So need to cap with the large page size.
1586       page_size = MIN2(rs.alignment(), os::large_page_size());
1587     }
1588   }
1589 




1550   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1551 
1552   // Override the default _filler_array_max_size so that no humongous filler
1553   // objects are created.
1554   _filler_array_max_size = _humongous_object_threshold_in_words;
1555 
1556   uint n_queues = ParallelGCThreads;
1557   _task_queues = new RefToScanQueueSet(n_queues);
1558 
1559   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1560 
1561   for (uint i = 0; i < n_queues; i++) {
1562     RefToScanQueue* q = new RefToScanQueue();
1563     q->initialize();
1564     _task_queues->register_queue(i, q);
1565     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1566   }
1567 
1568   // Initialize the G1EvacuationFailureALot counters and flags.
1569   NOT_PRODUCT(reset_evacuation_should_fail();)
1570   _gc_tracer_stw->initialize();
1571 
1572   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1573 }
1574 
1575 static size_t actual_reserved_page_size(ReservedSpace rs) {
1576   size_t page_size = os::vm_page_size();
1577   if (UseLargePages) {
1578     // There are two ways to manage large page memory.
1579     // 1. OS supports committing large page memory.
1580     // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
1581     //    And ReservedSpace calls it 'special'. If we failed to set 'special',
1582     //    we reserved memory without large page.
1583     if (os::can_commit_large_page_memory() || rs.special()) {
1584       // An alignment at ReservedSpace comes from preferred page size or
1585       // heap alignment, and if the alignment came from heap alignment, it could be
1586       // larger than large pages size. So need to cap with the large page size.
1587       page_size = MIN2(rs.alignment(), os::large_page_size());
1588     }
1589   }
1590 


< prev index next >