src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page




2000 }
2001 
2002 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2003                                                                             \
2004 void CompactibleFreeListSpace::                                             \
2005 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2006   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2007          "Shouldn't be called (yet) during parallel part of gc.");          \
2008   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2009   /*                                                                        \
2010    * This also restores any displaced headers and removes the elements from \
2011    * the iteration set as they are processed, so that we have a clean slate \
2012    * at the end of the iteration. Note, thus, that if new objects are       \
2013    * promoted as a result of the iteration they are iterated over as well.  \
2014    */                                                                       \
2015   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2016 }
2017 
2018 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2019 
2020 
2021 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2022   // ugghh... how would one do this efficiently for a non-contiguous space?
2023   guarantee(false, "NYI");
2024 }
2025 
2026 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2027   return _smallLinearAllocBlock._word_size == 0;
2028 }
2029 
2030 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2031   // Fix up linear allocation blocks to look like free blocks
2032   repairLinearAllocBlock(&_smallLinearAllocBlock);
2033 }
2034 
2035 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2036   assert_locked();
2037   if (blk->_ptr != NULL) {
2038     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2039            "Minimum block size requirement");
2040     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2041     fc->set_size(blk->_word_size);
2042     fc->link_prev(NULL);   // mark as free
2043     fc->dontCoalesce();
2044     assert(fc->is_free(), "just marked it free");
2045     assert(fc->cantCoalesce(), "just marked it uncoalescable");




2000 }
2001 
2002 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2003                                                                             \
2004 void CompactibleFreeListSpace::                                             \
2005 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2006   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2007          "Shouldn't be called (yet) during parallel part of gc.");          \
2008   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2009   /*                                                                        \
2010    * This also restores any displaced headers and removes the elements from \
2011    * the iteration set as they are processed, so that we have a clean slate \
2012    * at the end of the iteration. Note, thus, that if new objects are       \
2013    * promoted as a result of the iteration they are iterated over as well.  \
2014    */                                                                       \
2015   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2016 }
2017 
2018 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2019 






2020 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2021   return _smallLinearAllocBlock._word_size == 0;
2022 }
2023 
2024 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2025   // Fix up linear allocation blocks to look like free blocks
2026   repairLinearAllocBlock(&_smallLinearAllocBlock);
2027 }
2028 
2029 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2030   assert_locked();
2031   if (blk->_ptr != NULL) {
2032     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2033            "Minimum block size requirement");
2034     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2035     fc->set_size(blk->_word_size);
2036     fc->link_prev(NULL);   // mark as free
2037     fc->dontCoalesce();
2038     assert(fc->is_free(), "just marked it free");
2039     assert(fc->cantCoalesce(), "just marked it uncoalescable");