< prev index next >

src/share/vm/gc/cms/compactibleFreeListSpace.cpp

Print this page




1942   if (PrintFLSStatistics != 0) {
1943     gclog_or_tty->print("After GC:\n");
1944     reportFreeListStatistics();
1945   }
1946 }
1947 
1948 // Iteration support, mostly delegated from a CMS generation
1949 
1950 void CompactibleFreeListSpace::save_marks() {
1951   assert(Thread::current()->is_VM_thread(),
1952          "Global variable should only be set when single-threaded");
1953   // Mark the "end" of the used space at the time of this call;
1954   // note, however, that promoted objects from this point
1955   // on are tracked in the _promoInfo below.
1956   set_saved_mark_word(unallocated_block());
1957 #ifdef ASSERT
1958   // Check the sanity of save_marks() etc.
1959   MemRegion ur    = used_region();
1960   MemRegion urasm = used_region_at_save_marks();
1961   assert(ur.contains(urasm),
1962          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1963                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1964                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1965 #endif
1966   // inform allocator that promotions should be tracked.
1967   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1968   _promoInfo.startTrackingPromotions();
1969 }
1970 
1971 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1972   assert(_promoInfo.tracking(), "No preceding save_marks?");
1973   return _promoInfo.noPromotions();
1974 }
1975 
1976 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1977                                                                             \
1978 void CompactibleFreeListSpace::                                             \
1979 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1980   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1981   /*                                                                        \
1982    * This also restores any displaced headers and removes the elements from \
1983    * the iteration set as they are processed, so that we have a clean slate \
1984    * at the end of the iteration. Note, thus, that if new objects are       \


2858       OrderAccess::storestore();
2859       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2860       assert(fc->is_free(), "Error");
2861       fc->set_size(prefix_size);
2862       if (rem >= IndexSetSize) {
2863         returnChunkToDictionary(rem_fc);
2864         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2865         rem_fc = NULL;
2866       }
2867       // Otherwise, return it to the small list below.
2868     }
2869   }
2870   if (rem_fc != NULL) {
2871     MutexLockerEx x(_indexedFreeListParLocks[rem],
2872                     Mutex::_no_safepoint_check_flag);
2873     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2874     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2875     smallSplitBirth(rem);
2876   }
2877   assert(n * word_sz == fc->size(),
2878     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2879     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2880     fc->size(), n, word_sz));
2881   return fc;
2882 }
2883 
2884 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2885 
2886   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2887 
2888   if (fc == NULL) {
2889     return;
2890   }
2891 
2892   size_t n = fc->size() / word_sz;
2893 
2894   assert((ssize_t)n > 0, "Consistency");
2895   // Now do the splitting up.
2896   // Must do this in reverse order, so that anybody attempting to
2897   // access the main chunk sees it as a single free block until we
2898   // change it.
2899   size_t fc_size = n * word_sz;
2900   // All but first chunk in this loop




1942   if (PrintFLSStatistics != 0) {
1943     gclog_or_tty->print("After GC:\n");
1944     reportFreeListStatistics();
1945   }
1946 }
1947 
1948 // Iteration support, mostly delegated from a CMS generation
1949 
1950 void CompactibleFreeListSpace::save_marks() {
1951   assert(Thread::current()->is_VM_thread(),
1952          "Global variable should only be set when single-threaded");
1953   // Mark the "end" of the used space at the time of this call;
1954   // note, however, that promoted objects from this point
1955   // on are tracked in the _promoInfo below.
1956   set_saved_mark_word(unallocated_block());
1957 #ifdef ASSERT
1958   // Check the sanity of save_marks() etc.
1959   MemRegion ur    = used_region();
1960   MemRegion urasm = used_region_at_save_marks();
1961   assert(ur.contains(urasm),
1962          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1963          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1964          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1965 #endif
1966   // inform allocator that promotions should be tracked.
1967   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1968   _promoInfo.startTrackingPromotions();
1969 }
1970 
1971 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1972   assert(_promoInfo.tracking(), "No preceding save_marks?");
1973   return _promoInfo.noPromotions();
1974 }
1975 
1976 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1977                                                                             \
1978 void CompactibleFreeListSpace::                                             \
1979 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1980   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1981   /*                                                                        \
1982    * This also restores any displaced headers and removes the elements from \
1983    * the iteration set as they are processed, so that we have a clean slate \
1984    * at the end of the iteration. Note, thus, that if new objects are       \


2858       OrderAccess::storestore();
2859       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2860       assert(fc->is_free(), "Error");
2861       fc->set_size(prefix_size);
2862       if (rem >= IndexSetSize) {
2863         returnChunkToDictionary(rem_fc);
2864         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2865         rem_fc = NULL;
2866       }
2867       // Otherwise, return it to the small list below.
2868     }
2869   }
2870   if (rem_fc != NULL) {
2871     MutexLockerEx x(_indexedFreeListParLocks[rem],
2872                     Mutex::_no_safepoint_check_flag);
2873     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2874     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2875     smallSplitBirth(rem);
2876   }
2877   assert(n * word_sz == fc->size(),
2878          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2879          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2880          fc->size(), n, word_sz);
2881   return fc;
2882 }
2883 
2884 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2885 
2886   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2887 
2888   if (fc == NULL) {
2889     return;
2890   }
2891 
2892   size_t n = fc->size() / word_sz;
2893 
2894   assert((ssize_t)n > 0, "Consistency");
2895   // Now do the splitting up.
2896   // Must do this in reverse order, so that anybody attempting to
2897   // access the main chunk sees it as a single free block until we
2898   // change it.
2899   size_t fc_size = n * word_sz;
2900   // All but first chunk in this loop


< prev index next >