src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/concurrentMarkSweep

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 5826 : 8026849: Fix typos in the GC code, part 2
Summary: Fixed typos in assert messages, flag descriptions and verbose messages
Reviewed-by:


 941     return;
 942   }
 943 
 944   double free_percentage = ((double) free()) / capacity();
 945   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 946   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 947 
 948   // compute expansion delta needed for reaching desired free percentage
 949   if (free_percentage < desired_free_percentage) {
 950     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 951     assert(desired_capacity >= capacity(), "invalid expansion size");
 952     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 953     if (PrintGCDetails && Verbose) {
 954       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 955       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 956       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 957       gclog_or_tty->print_cr("  Desired free fraction %f",
 958         desired_free_percentage);
 959       gclog_or_tty->print_cr("  Maximum free fraction %f",
 960         maximum_free_percentage);
 961       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 962       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 963         desired_capacity/1000);
 964       int prev_level = level() - 1;
 965       if (prev_level >= 0) {
 966         size_t prev_size = 0;
 967         GenCollectedHeap* gch = GenCollectedHeap::heap();
 968         Generation* prev_gen = gch->_gens[prev_level];
 969         prev_size = prev_gen->capacity();
 970           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 971                                  prev_size/1000);
 972       }
 973       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 974         unsafe_max_alloc_nogc()/1000);
 975       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 976         contiguous_available()/1000);
 977       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 978         expand_bytes);
 979     }
 980     // safe if expansion fails
 981     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);


3296   bool res = should_concurrent_collect();
3297   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3298   return res;
3299 }
3300 
3301 void CMSCollector::setup_cms_unloading_and_verification_state() {
3302   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3303                              || VerifyBeforeExit;
3304   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
3305 
3306   // We set the proper root for this CMS cycle here.
3307   if (should_unload_classes()) {   // Should unload classes this cycle
3308     remove_root_scanning_option(SharedHeap::SO_AllClasses);
3309     add_root_scanning_option(SharedHeap::SO_SystemClasses);
3310     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3311     set_verifying(should_verify);    // Set verification state for this cycle
3312     return;                            // Nothing else needs to be done at this time
3313   }
3314 
3315   // Not unloading classes this cycle
3316   assert(!should_unload_classes(), "Inconsitency!");
3317   remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3318   add_root_scanning_option(SharedHeap::SO_AllClasses);
3319 
3320   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3321     // Include symbols, strings and code cache elements to prevent their resurrection.
3322     add_root_scanning_option(rso);
3323     set_verifying(true);
3324   } else if (verifying() && !should_verify) {
3325     // We were verifying, but some verification flags got disabled.
3326     set_verifying(false);
3327     // Exclude symbols, strings and code cache elements from root scanning to
3328     // reduce IM and RM pauses.
3329     remove_root_scanning_option(rso);
3330   }
3331 }
3332 
3333 
3334 #ifndef PRODUCT
3335 HeapWord* CMSCollector::block_start(const void* p) const {
3336   const HeapWord* addr = (HeapWord*)p;


7226     ConcurrentMarkSweepThread::acknowledge_yield_request();
7227   }
7228 
7229   ConcurrentMarkSweepThread::synchronize(true);
7230   _freelistLock->lock_without_safepoint_check();
7231   _bitMap->lock()->lock_without_safepoint_check();
7232   _collector->startTimer();
7233 }
7234 
7235 
7236 //////////////////////////////////////////////////////////////////
7237 // SurvivorSpacePrecleanClosure
7238 //////////////////////////////////////////////////////////////////
7239 // This (single-threaded) closure is used to preclean the oops in
7240 // the survivor spaces.
7241 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7242 
7243   HeapWord* addr = (HeapWord*)p;
7244   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7245   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7246   assert(p->klass_or_null() != NULL, "object should be initializd");
7247   // an initialized object; ignore mark word in verification below
7248   // since we are running concurrent with mutators
7249   assert(p->is_oop(true), "should be an oop");
7250   // Note that we do not yield while we iterate over
7251   // the interior oops of p, pushing the relevant ones
7252   // on our marking stack.
7253   size_t size = p->oop_iterate(_scanning_closure);
7254   do_yield_check();
7255   // Observe that below, we do not abandon the preclean
7256   // phase as soon as we should; rather we empty the
7257   // marking stack before returning. This is to satisfy
7258   // some existing assertions. In general, it may be a
7259   // good idea to abort immediately and complete the marking
7260   // from the grey objects at a later time.
7261   while (!_mark_stack->isEmpty()) {
7262     oop new_oop = _mark_stack->pop();
7263     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7264     assert(_bit_map->isMarked((HeapWord*)new_oop),
7265            "only grey objects on this stack");
7266     // iterate over the oops in this oop, marking and pushing




 941     return;
 942   }
 943 
 944   double free_percentage = ((double) free()) / capacity();
 945   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 946   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 947 
 948   // compute expansion delta needed for reaching desired free percentage
 949   if (free_percentage < desired_free_percentage) {
 950     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 951     assert(desired_capacity >= capacity(), "invalid expansion size");
 952     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 953     if (PrintGCDetails && Verbose) {
 954       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 955       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 956       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 957       gclog_or_tty->print_cr("  Desired free fraction %f",
 958         desired_free_percentage);
 959       gclog_or_tty->print_cr("  Maximum free fraction %f",
 960         maximum_free_percentage);
 961       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 962       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 963         desired_capacity/1000);
 964       int prev_level = level() - 1;
 965       if (prev_level >= 0) {
 966         size_t prev_size = 0;
 967         GenCollectedHeap* gch = GenCollectedHeap::heap();
 968         Generation* prev_gen = gch->_gens[prev_level];
 969         prev_size = prev_gen->capacity();
 970           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 971                                  prev_size/1000);
 972       }
 973       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 974         unsafe_max_alloc_nogc()/1000);
 975       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 976         contiguous_available()/1000);
 977       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 978         expand_bytes);
 979     }
 980     // safe if expansion fails
 981     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);


3296   bool res = should_concurrent_collect();
3297   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3298   return res;
3299 }
3300 
3301 void CMSCollector::setup_cms_unloading_and_verification_state() {
3302   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3303                              || VerifyBeforeExit;
3304   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
3305 
3306   // We set the proper root for this CMS cycle here.
3307   if (should_unload_classes()) {   // Should unload classes this cycle
3308     remove_root_scanning_option(SharedHeap::SO_AllClasses);
3309     add_root_scanning_option(SharedHeap::SO_SystemClasses);
3310     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3311     set_verifying(should_verify);    // Set verification state for this cycle
3312     return;                            // Nothing else needs to be done at this time
3313   }
3314 
3315   // Not unloading classes this cycle
3316   assert(!should_unload_classes(), "Inconsistency!");
3317   remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3318   add_root_scanning_option(SharedHeap::SO_AllClasses);
3319 
3320   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3321     // Include symbols, strings and code cache elements to prevent their resurrection.
3322     add_root_scanning_option(rso);
3323     set_verifying(true);
3324   } else if (verifying() && !should_verify) {
3325     // We were verifying, but some verification flags got disabled.
3326     set_verifying(false);
3327     // Exclude symbols, strings and code cache elements from root scanning to
3328     // reduce IM and RM pauses.
3329     remove_root_scanning_option(rso);
3330   }
3331 }
3332 
3333 
3334 #ifndef PRODUCT
3335 HeapWord* CMSCollector::block_start(const void* p) const {
3336   const HeapWord* addr = (HeapWord*)p;


7226     ConcurrentMarkSweepThread::acknowledge_yield_request();
7227   }
7228 
7229   ConcurrentMarkSweepThread::synchronize(true);
7230   _freelistLock->lock_without_safepoint_check();
7231   _bitMap->lock()->lock_without_safepoint_check();
7232   _collector->startTimer();
7233 }
7234 
7235 
7236 //////////////////////////////////////////////////////////////////
7237 // SurvivorSpacePrecleanClosure
7238 //////////////////////////////////////////////////////////////////
7239 // This (single-threaded) closure is used to preclean the oops in
7240 // the survivor spaces.
7241 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7242 
7243   HeapWord* addr = (HeapWord*)p;
7244   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7245   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7246   assert(p->klass_or_null() != NULL, "object should be initialized");
7247   // an initialized object; ignore mark word in verification below
7248   // since we are running concurrent with mutators
7249   assert(p->is_oop(true), "should be an oop");
7250   // Note that we do not yield while we iterate over
7251   // the interior oops of p, pushing the relevant ones
7252   // on our marking stack.
7253   size_t size = p->oop_iterate(_scanning_closure);
7254   do_yield_check();
7255   // Observe that below, we do not abandon the preclean
7256   // phase as soon as we should; rather we empty the
7257   // marking stack before returning. This is to satisfy
7258   // some existing assertions. In general, it may be a
7259   // good idea to abort immediately and complete the marking
7260   // from the grey objects at a later time.
7261   while (!_mark_stack->isEmpty()) {
7262     oop new_oop = _mark_stack->pop();
7263     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7264     assert(_bit_map->isMarked((HeapWord*)new_oop),
7265            "only grey objects on this stack");
7266     // iterate over the oops in this oop, marking and pushing


src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File