< prev index next >

src/share/vm/gc/shared/adaptiveSizePolicy.cpp

Print this page
rev 11777 : [mq]: gcinterface.patch


 108   // If the user has turned off using a dynamic number of GC threads
 109   // or the users has requested a specific number, set the active
 110   // number of workers to all the workers.
 111 
 112   uintx new_active_workers = total_workers;
 113   uintx prev_active_workers = active_workers;
 114   uintx active_workers_by_JT = 0;
 115   uintx active_workers_by_heap_size = 0;
 116 
 117   // Always use at least min_workers but use up to
 118   // GCThreadsPerJavaThreads * application threads.
 119   active_workers_by_JT =
 120     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 121          min_workers);
 122 
 123   // Choose a number of GC threads based on the current size
 124   // of the heap.  This may be complicated because the size of
 125   // the heap depends on factors such as the throughput goal.
 126   // Still a large heap should be collected by more GC threads.
 127   active_workers_by_heap_size =
 128       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
 129 
 130   uintx max_active_workers =
 131     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 132 
 133   new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
 134 
 135   // Increase GC workers instantly but decrease them more
 136   // slowly.
 137   if (new_active_workers < prev_active_workers) {
 138     new_active_workers =
 139       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 140   }
 141 
 142   // Check once more that the number of workers is within the limits.
 143   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 144   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 145   assert(new_active_workers <= total_workers, "Total workers not observed");
 146 
 147   if (ForceDynamicNumberOfGCThreads) {
 148     // Assume this is debugging and jiggle the number of GC threads.


 610 
 611   log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
 612   log_debug(gc, ergo)("                       GC overhead (%%)");
 613   log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
 614                       100.0 * avg_minor_gc_cost()->average(), young_gen_action);
 615   log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
 616                       100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
 617   return true;
 618 }
 619 
 620 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
 621   // Tenuring threshold
 622   if (decrement_tenuring_threshold_for_survivor_limit()) {
 623     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
 624   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 625     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
 626   } else if (increment_tenuring_threshold_for_gc_cost()) {
 627     log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
 628   } else {
 629     assert(!tenuring_threshold_change(), "(no change was attempted)");























 630   }
 631 }


 108   // If the user has turned off using a dynamic number of GC threads
 109   // or the users has requested a specific number, set the active
 110   // number of workers to all the workers.
 111 
 112   uintx new_active_workers = total_workers;
 113   uintx prev_active_workers = active_workers;
 114   uintx active_workers_by_JT = 0;
 115   uintx active_workers_by_heap_size = 0;
 116 
 117   // Always use at least min_workers but use up to
 118   // GCThreadsPerJavaThreads * application threads.
 119   active_workers_by_JT =
 120     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 121          min_workers);
 122 
 123   // Choose a number of GC threads based on the current size
 124   // of the heap.  This may be complicated because the size of
 125   // the heap depends on factors such as the throughput goal.
 126   // Still a large heap should be collected by more GC threads.
 127   active_workers_by_heap_size =
 128       MAX2((size_t) 2U, GC::gc()->heap()->capacity() / HeapSizePerGCThread);
 129 
 130   uintx max_active_workers =
 131     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 132 
 133   new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
 134 
 135   // Increase GC workers instantly but decrease them more
 136   // slowly.
 137   if (new_active_workers < prev_active_workers) {
 138     new_active_workers =
 139       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 140   }
 141 
 142   // Check once more that the number of workers is within the limits.
 143   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 144   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 145   assert(new_active_workers <= total_workers, "Total workers not observed");
 146 
 147   if (ForceDynamicNumberOfGCThreads) {
 148     // Assume this is debugging and jiggle the number of GC threads.


 610 
 611   log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
 612   log_debug(gc, ergo)("                       GC overhead (%%)");
 613   log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
 614                       100.0 * avg_minor_gc_cost()->average(), young_gen_action);
 615   log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
 616                       100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
 617   return true;
 618 }
 619 
 620 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
 621   // Tenuring threshold
 622   if (decrement_tenuring_threshold_for_survivor_limit()) {
 623     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
 624   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 625     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
 626   } else if (increment_tenuring_threshold_for_gc_cost()) {
 627     log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
 628   } else {
 629     assert(!tenuring_threshold_change(), "(no change was attempted)");
 630   }
 631 }
 632 
 633 bool AdaptiveSizePolicyOutput::enabled() {
 634   return UseParallelGC &&
 635     UseAdaptiveSizePolicy &&
 636     log_is_enabled(Debug, gc, ergo);
 637 }
 638 
 639 void AdaptiveSizePolicyOutput::print() {
 640   if (enabled()) {
 641     GC::gc()->heap()->size_policy()->print();
 642   }
 643 }
 644 
 645 void AdaptiveSizePolicyOutput::print(AdaptiveSizePolicy* size_policy, uint count) {
 646   bool do_print =
 647     enabled() &&
 648     (AdaptiveSizePolicyOutputInterval > 0) &&
 649     (count % AdaptiveSizePolicyOutputInterval) == 0;
 650 
 651   if (do_print) {
 652     size_policy->print();
 653   }
 654 }
< prev index next >