src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/concurrentMarkSweep

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7215 : imported patch remove_levels


 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size, int level,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, level, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)
 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {


 802     _space_counters->update_used(used);
 803     _space_counters->update_capacity();
 804     _gen_counters->update_all();
 805   }
 806 }
 807 
 808 void ConcurrentMarkSweepGeneration::print() const {
 809   Generation::print();
 810   cmsSpace()->print();
 811 }
 812 
 813 #ifndef PRODUCT
 814 void ConcurrentMarkSweepGeneration::print_statistics() {
 815   cmsSpace()->printFLCensus(0);
 816 }
 817 #endif
 818 
 819 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 820   GenCollectedHeap* gch = GenCollectedHeap::heap();
 821   if (PrintGCDetails) {





 822     if (Verbose) {
 823       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 824         level(), short_name(), s, used(), capacity());
 825     } else {
 826       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 827         level(), short_name(), s, used() / K, capacity() / K);
 828     }
 829   }
 830   if (Verbose) {
 831     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 832               gch->used(), gch->capacity());
 833   } else {
 834     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 835               gch->used() / K, gch->capacity() / K);
 836   }
 837 }
 838 
 839 size_t
 840 ConcurrentMarkSweepGeneration::contiguous_available() const {
 841   // dld proposes an improvement in precision here. If the committed
 842   // part of the space ends in a free block we should add that to
 843   // uncommitted size in the calculation below. Will make this
 844   // change later, staying with the approximation below for the
 845   // time being. -- ysr.
 846   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 847 }


 931     return;
 932   }
 933 
 934   double free_percentage = ((double) free()) / capacity();
 935   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 936   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 937 
 938   // compute expansion delta needed for reaching desired free percentage
 939   if (free_percentage < desired_free_percentage) {
 940     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 941     assert(desired_capacity >= capacity(), "invalid expansion size");
 942     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 943     if (PrintGCDetails && Verbose) {
 944       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 945       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 946       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 947       gclog_or_tty->print_cr("  Desired free fraction %f",
 948         desired_free_percentage);
 949       gclog_or_tty->print_cr("  Maximum free fraction %f",
 950         maximum_free_percentage);
 951       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 952       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 953         desired_capacity/1000);
 954       int prev_level = level() - 1;
 955       if (prev_level >= 0) {
 956         size_t prev_size = 0;
 957         GenCollectedHeap* gch = GenCollectedHeap::heap();
 958         Generation* prev_gen = gch->young_gen();
 959         prev_size = prev_gen->capacity();
 960           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 961                                  prev_size/1000);
 962       }
 963       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 964         unsafe_max_alloc_nogc()/1000);
 965       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 966         contiguous_available()/1000);
 967       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 968         expand_bytes);
 969     }
 970     // safe if expansion fails
 971     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 972     if (PrintGCDetails && Verbose) {
 973       gclog_or_tty->print_cr("  Expanded free fraction %f",
 974         ((double) free()) / capacity());
 975     }
 976   } else {
 977     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 978     assert(desired_capacity <= capacity(), "invalid expansion size");
 979     size_t shrink_bytes = capacity() - desired_capacity;
 980     // Don't shrink unless the delta is greater than the minimum shrink we want
 981     if (shrink_bytes >= MinHeapDeltaBytes) {
 982       shrink_free_list_by(shrink_bytes);
 983     }
 984   }
 985 }
 986 


2037   // collection, clear the _modUnionTable.
2038   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2039     "_modUnionTable should be clear if the baton was not passed");
2040   _modUnionTable.clear_all();
2041   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2042     "mod union for klasses should be clear if the baton was passed");
2043   _ct->klass_rem_set()->clear_mod_union();
2044 
2045   // We must adjust the allocation statistics being maintained
2046   // in the free list space. We do so by reading and clearing
2047   // the sweep timer and updating the block flux rate estimates below.
2048   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2049   if (_inter_sweep_timer.is_active()) {
2050     _inter_sweep_timer.stop();
2051     // Note that we do not use this sample to update the _inter_sweep_estimate.
2052     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2053                                             _inter_sweep_estimate.padded_average(),
2054                                             _intra_sweep_estimate.padded_average());
2055   }
2056 
2057   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2058     ref_processor(), clear_all_soft_refs);
2059   #ifdef ASSERT
2060     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2061     size_t free_size = cms_space->free();
2062     assert(free_size ==
2063            pointer_delta(cms_space->end(), cms_space->compaction_top())
2064            * HeapWordSize,
2065       "All the free space should be compacted into one chunk at top");
2066     assert(cms_space->dictionary()->total_chunk_size(
2067                                       debug_only(cms_space->freelistLock())) == 0 ||
2068            cms_space->totalSizeInIndexedFreeLists() == 0,
2069       "All the free space should be in a single chunk");
2070     size_t num = cms_space->totalCount();
2071     assert((free_size == 0 && num == 0) ||
2072            (free_size > 0  && (num == 1 || num == 2)),
2073          "There should be at most 2 free chunks after compaction");
2074   #endif // ASSERT
2075   _collectorState = Resetting;
2076   assert(_restart_addr == NULL,
2077          "Should have been NULL'd before baton was passed");
2078   reset(false /* == !asynch */);


2986   } else {
2987     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2988             CMSRemarkVerifyVariant);
2989   }
2990   if (!silent) gclog_or_tty->print(" done] ");
2991   return true;
2992 }
2993 
2994 void CMSCollector::verify_after_remark_work_1() {
2995   ResourceMark rm;
2996   HandleMark  hm;
2997   GenCollectedHeap* gch = GenCollectedHeap::heap();
2998 
2999   // Get a clear set of claim bits for the roots processing to work with.
3000   ClassLoaderDataGraph::clear_claimed_marks();
3001 
3002   // Mark from roots one level into CMS
3003   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3004   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3005 
3006   gch->gen_process_roots(_cmsGen->level(),
3007                          true,   // younger gens are roots
3008                          true,   // activate StrongRootsScope
3009                          SharedHeap::ScanningOption(roots_scanning_options()),
3010                          should_unload_classes(),
3011                          &notOlder,
3012                          NULL,
3013                          NULL);  // SSS: Provide correct closure
3014 
3015   // Now mark from the roots
3016   MarkFromRootsClosure markFromRootsClosure(this, _span,
3017     verification_mark_bm(), verification_mark_stack(),
3018     false /* don't yield */, true /* verifying */);
3019   assert(_restart_addr == NULL, "Expected pre-condition");
3020   verification_mark_bm()->iterate(&markFromRootsClosure);
3021   while (_restart_addr != NULL) {
3022     // Deal with stack overflow: by restarting at the indicated
3023     // address.
3024     HeapWord* ra = _restart_addr;
3025     markFromRootsClosure.reset(ra);
3026     _restart_addr = NULL;


3054   void do_klass(Klass* k) {
3055     k->oops_do(&_oop_closure);
3056   }
3057 };
3058 
3059 void CMSCollector::verify_after_remark_work_2() {
3060   ResourceMark rm;
3061   HandleMark  hm;
3062   GenCollectedHeap* gch = GenCollectedHeap::heap();
3063 
3064   // Get a clear set of claim bits for the roots processing to work with.
3065   ClassLoaderDataGraph::clear_claimed_marks();
3066 
3067   // Mark from roots one level into CMS
3068   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3069                                      markBitMap());
3070   CLDToOopClosure cld_closure(&notOlder, true);
3071 
3072   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3073 
3074   gch->gen_process_roots(_cmsGen->level(),
3075                          true,   // younger gens are roots
3076                          true,   // activate StrongRootsScope
3077                          SharedHeap::ScanningOption(roots_scanning_options()),
3078                          should_unload_classes(),
3079                          &notOlder,
3080                          NULL,
3081                          &cld_closure);
3082 
3083   // Now mark from the roots
3084   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3085     verification_mark_bm(), markBitMap(), verification_mark_stack());
3086   assert(_restart_addr == NULL, "Expected pre-condition");
3087   verification_mark_bm()->iterate(&markFromRootsClosure);
3088   while (_restart_addr != NULL) {
3089     // Deal with stack overflow: by restarting at the indicated
3090     // address.
3091     HeapWord* ra = _restart_addr;
3092     markFromRootsClosure.reset(ra);
3093     _restart_addr = NULL;
3094     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());


3668     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3669       // The parallel version.
3670       FlexibleWorkGang* workers = gch->workers();
3671       assert(workers != NULL, "Need parallel worker threads.");
3672       int n_workers = workers->active_workers();
3673       CMSParInitialMarkTask tsk(this, n_workers);
3674       gch->set_par_threads(n_workers);
3675       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3676       if (n_workers > 1) {
3677         GenCollectedHeap::StrongRootsScope srs(gch);
3678         workers->run_task(&tsk);
3679       } else {
3680         GenCollectedHeap::StrongRootsScope srs(gch);
3681         tsk.work(0);
3682       }
3683       gch->set_par_threads(0);
3684     } else {
3685       // The serial version.
3686       CLDToOopClosure cld_closure(&notOlder, true);
3687       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3688       gch->gen_process_roots(_cmsGen->level(),
3689                              true,   // younger gens are roots
3690                              true,   // activate StrongRootsScope
3691                              SharedHeap::ScanningOption(roots_scanning_options()),
3692                              should_unload_classes(),
3693                              &notOlder,
3694                              NULL,
3695                              &cld_closure);
3696     }
3697   }
3698 
3699   // Clear mod-union table; it will be dirtied in the prologue of
3700   // CMS generation per each younger generation collection.
3701 
3702   assert(_modUnionTable.isAllClear(),
3703        "Was cleared in most recent final checkpoint phase"
3704        " or no bits are set in the gc_prologue before the start of the next "
3705        "subsequent marking phase.");
3706 
3707   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3708 


4944          "world should be stopped");
4945   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4946 
4947   verify_work_stacks_empty();
4948   verify_overflow_empty();
4949 
4950   SpecializationStats::clear();
4951   if (PrintGCDetails) {
4952     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4953                         _young_gen->used() / K,
4954                         _young_gen->capacity() / K);
4955   }
4956   if (asynch) {
4957     if (CMSScavengeBeforeRemark) {
4958       GenCollectedHeap* gch = GenCollectedHeap::heap();
4959       // Temporarily set flag to false, GCH->do_collection will
4960       // expect it to be false and set to true
4961       FlagSetting fl(gch->_is_gc_active, false);
4962       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4963         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4964       int level = _cmsGen->level() - 1;
4965       if (level >= 0) {
4966         gch->do_collection(true,        // full (i.e. force, see below)
4967                            false,       // !clear_all_soft_refs
4968                            0,           // size
4969                            false,       // is_tlab
4970                            level        // max_level
4971                           );
4972       }
4973     }
4974     FreelistLocker x(this);
4975     MutexLockerEx y(bitMapLock(),
4976                     Mutex::_no_safepoint_check_flag);
4977     assert(!init_mark_was_synchronous, "but that's impossible!");
4978     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4979   } else {
4980     // already have all the locks
4981     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4982                              init_mark_was_synchronous);
4983   }
4984   verify_work_stacks_empty();
4985   verify_overflow_empty();
4986   SpecializationStats::print();
4987 }
4988 
4989 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4990   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4991 
4992   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4993 


5139   GenCollectedHeap* gch = GenCollectedHeap::heap();
5140   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5141 
5142   // ---------- young gen roots --------------
5143   {
5144     work_on_young_gen_roots(worker_id, &par_mri_cl);
5145     _timer.stop();
5146     if (PrintCMSStatistics != 0) {
5147       gclog_or_tty->print_cr(
5148         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5149         worker_id, _timer.seconds());
5150     }
5151   }
5152 
5153   // ---------- remaining roots --------------
5154   _timer.reset();
5155   _timer.start();
5156 
5157   CLDToOopClosure cld_closure(&par_mri_cl, true);
5158 
5159   gch->gen_process_roots(_collector->_cmsGen->level(),
5160                          false,     // yg was scanned above
5161                          false,     // this is parallel code
5162                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5163                          _collector->should_unload_classes(),
5164                          &par_mri_cl,
5165                          NULL,
5166                          &cld_closure);
5167   assert(_collector->should_unload_classes()
5168          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5169          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5170   _timer.stop();
5171   if (PrintCMSStatistics != 0) {
5172     gclog_or_tty->print_cr(
5173       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5174       worker_id, _timer.seconds());
5175   }
5176 }
5177 
5178 // Parallel remark task
5179 class CMSParRemarkTask: public CMSParMarkTask {


5275     work_queue(worker_id));
5276 
5277   // Rescan young gen roots first since these are likely
5278   // coarsely partitioned and may, on that account, constitute
5279   // the critical path; thus, it's best to start off that
5280   // work first.
5281   // ---------- young gen roots --------------
5282   {
5283     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5284     _timer.stop();
5285     if (PrintCMSStatistics != 0) {
5286       gclog_or_tty->print_cr(
5287         "Finished young gen rescan work in %dth thread: %3.3f sec",
5288         worker_id, _timer.seconds());
5289     }
5290   }
5291 
5292   // ---------- remaining roots --------------
5293   _timer.reset();
5294   _timer.start();
5295   gch->gen_process_roots(_collector->_cmsGen->level(),
5296                          false,     // yg was scanned above
5297                          false,     // this is parallel code
5298                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5299                          _collector->should_unload_classes(),
5300                          &par_mrias_cl,
5301                          NULL,
5302                          NULL);     // The dirty klasses will be handled below
5303 
5304   assert(_collector->should_unload_classes()
5305          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5306          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5307   _timer.stop();
5308   if (PrintCMSStatistics != 0) {
5309     gclog_or_tty->print_cr(
5310       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5311       worker_id, _timer.seconds());
5312   }
5313 
5314   // ---------- unhandled CLD scanning ----------
5315   if (worker_id == 0) { // Single threaded at the moment.


5867       verify_work_stacks_empty();
5868       if (PrintCMSStatistics != 0) {
5869         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5870           markFromDirtyCardsClosure.num_dirty_cards());
5871       }
5872     }
5873   }
5874   if (VerifyDuringGC &&
5875       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5876     HandleMark hm;  // Discard invalid handles created during verification
5877     Universe::verify();
5878   }
5879   {
5880     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5881 
5882     verify_work_stacks_empty();
5883 
5884     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5885     GenCollectedHeap::StrongRootsScope srs(gch);
5886 
5887     gch->gen_process_roots(_cmsGen->level(),
5888                            true,  // younger gens as roots
5889                            false, // use the local StrongRootsScope
5890                            SharedHeap::ScanningOption(roots_scanning_options()),
5891                            should_unload_classes(),
5892                            &mrias_cl,
5893                            NULL,
5894                            NULL); // The dirty klasses will be handled below
5895 
5896     assert(should_unload_classes()
5897            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5898            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5899   }
5900 
5901   {
5902     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5903 
5904     verify_work_stacks_empty();
5905 
5906     // Scan all class loader data objects that might have been introduced
5907     // during concurrent marking.


6344   size_t nearLargestOffset =
6345     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6346   if (PrintFLSStatistics != 0) {
6347     gclog_or_tty->print_cr(
6348       "CMS: Large Block: " PTR_FORMAT ";"
6349       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6350       largestAddr,
6351       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6352   }
6353   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6354 }
6355 
6356 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6357   return addr >= _cmsSpace->nearLargestChunk();
6358 }
6359 
6360 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6361   return _cmsSpace->find_chunk_at_end();
6362 }
6363 
6364 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6365                                                     bool full) {
6366   // The next lower level has been collected.  Gather any statistics
6367   // that are of interest at this point.
6368   if (!full && (current_level + 1) == level()) {

6369     // Gather statistics on the young generation collection.
6370     collector()->stats().record_gc0_end(used());
6371   }
6372 }
6373 
6374 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6375   if (PrintGCDetails && Verbose) {
6376     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6377   }
6378   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6379   _debug_collection_type =
6380     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6381   if (PrintGCDetails && Verbose) {
6382     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6383   }
6384 }
6385 
6386 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6387   bool asynch) {
6388   // We iterate over the space(s) underlying this generation,




 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)
 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {


 802     _space_counters->update_used(used);
 803     _space_counters->update_capacity();
 804     _gen_counters->update_all();
 805   }
 806 }
 807 
 808 void ConcurrentMarkSweepGeneration::print() const {
 809   Generation::print();
 810   cmsSpace()->print();
 811 }
 812 
 813 #ifndef PRODUCT
 814 void ConcurrentMarkSweepGeneration::print_statistics() {
 815   cmsSpace()->printFLCensus(0);
 816 }
 817 #endif
 818 
 819 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 820   GenCollectedHeap* gch = GenCollectedHeap::heap();
 821   if (PrintGCDetails) {
 822     // I didn't want to change the logging when removing the level concept,
 823     // but I guess this logging could say "old" or something instead of "1".
 824     assert(this == gch->old_gen(),
 825            "The CMS generation should be the old generation");
 826     int level = 1;
 827     if (Verbose) {
 828       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 829         level, short_name(), s, used(), capacity());
 830     } else {
 831       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 832         level, short_name(), s, used() / K, capacity() / K);
 833     }
 834   }
 835   if (Verbose) {
 836     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 837               gch->used(), gch->capacity());
 838   } else {
 839     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 840               gch->used() / K, gch->capacity() / K);
 841   }
 842 }
 843 
 844 size_t
 845 ConcurrentMarkSweepGeneration::contiguous_available() const {
 846   // dld proposes an improvement in precision here. If the committed
 847   // part of the space ends in a free block we should add that to
 848   // uncommitted size in the calculation below. Will make this
 849   // change later, staying with the approximation below for the
 850   // time being. -- ysr.
 851   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 852 }


 936     return;
 937   }
 938 
 939   double free_percentage = ((double) free()) / capacity();
 940   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 941   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 942 
 943   // compute expansion delta needed for reaching desired free percentage
 944   if (free_percentage < desired_free_percentage) {
 945     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 946     assert(desired_capacity >= capacity(), "invalid expansion size");
 947     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 948     if (PrintGCDetails && Verbose) {
 949       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 950       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 951       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 952       gclog_or_tty->print_cr("  Desired free fraction %f",
 953                              desired_free_percentage);
 954       gclog_or_tty->print_cr("  Maximum free fraction %f",
 955                              maximum_free_percentage);
 956       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity() / 1000);
 957       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 958                              desired_capacity / 1000);



 959       GenCollectedHeap* gch = GenCollectedHeap::heap();
 960       assert(this == gch->_old_gen,
 961              "The CMS generation should always be the old generation");
 962       size_t young_size = gch->_young_gen->capacity();
 963       gclog_or_tty->print_cr("  Young gen size "SIZE_FORMAT,
 964                              young_size / 1000);
 965       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 966                              unsafe_max_alloc_nogc() / 1000);
 967       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 968                              contiguous_available() / 1000);
 969       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 970                              expand_bytes);
 971     }
 972     // safe if expansion fails
 973     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 974     if (PrintGCDetails && Verbose) {
 975       gclog_or_tty->print_cr("  Expanded free fraction %f",
 976         ((double) free()) / capacity());
 977     }
 978   } else {
 979     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 980     assert(desired_capacity <= capacity(), "invalid expansion size");
 981     size_t shrink_bytes = capacity() - desired_capacity;
 982     // Don't shrink unless the delta is greater than the minimum shrink we want
 983     if (shrink_bytes >= MinHeapDeltaBytes) {
 984       shrink_free_list_by(shrink_bytes);
 985     }
 986   }
 987 }
 988 


2039   // collection, clear the _modUnionTable.
2040   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2041     "_modUnionTable should be clear if the baton was not passed");
2042   _modUnionTable.clear_all();
2043   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2044     "mod union for klasses should be clear if the baton was passed");
2045   _ct->klass_rem_set()->clear_mod_union();
2046 
2047   // We must adjust the allocation statistics being maintained
2048   // in the free list space. We do so by reading and clearing
2049   // the sweep timer and updating the block flux rate estimates below.
2050   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2051   if (_inter_sweep_timer.is_active()) {
2052     _inter_sweep_timer.stop();
2053     // Note that we do not use this sample to update the _inter_sweep_estimate.
2054     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2055                                             _inter_sweep_estimate.padded_average(),
2056                                             _intra_sweep_estimate.padded_average());
2057   }
2058 
2059   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);

2060   #ifdef ASSERT
2061     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2062     size_t free_size = cms_space->free();
2063     assert(free_size ==
2064            pointer_delta(cms_space->end(), cms_space->compaction_top())
2065            * HeapWordSize,
2066       "All the free space should be compacted into one chunk at top");
2067     assert(cms_space->dictionary()->total_chunk_size(
2068                                       debug_only(cms_space->freelistLock())) == 0 ||
2069            cms_space->totalSizeInIndexedFreeLists() == 0,
2070       "All the free space should be in a single chunk");
2071     size_t num = cms_space->totalCount();
2072     assert((free_size == 0 && num == 0) ||
2073            (free_size > 0  && (num == 1 || num == 2)),
2074          "There should be at most 2 free chunks after compaction");
2075   #endif // ASSERT
2076   _collectorState = Resetting;
2077   assert(_restart_addr == NULL,
2078          "Should have been NULL'd before baton was passed");
2079   reset(false /* == !asynch */);


2987   } else {
2988     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2989             CMSRemarkVerifyVariant);
2990   }
2991   if (!silent) gclog_or_tty->print(" done] ");
2992   return true;
2993 }
2994 
2995 void CMSCollector::verify_after_remark_work_1() {
2996   ResourceMark rm;
2997   HandleMark  hm;
2998   GenCollectedHeap* gch = GenCollectedHeap::heap();
2999 
3000   // Get a clear set of claim bits for the roots processing to work with.
3001   ClassLoaderDataGraph::clear_claimed_marks();
3002 
3003   // Mark from roots one level into CMS
3004   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3005   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3006 
3007   gch->gen_process_roots(Generation::Old,
3008                          true,   // younger gens are roots
3009                          true,   // activate StrongRootsScope
3010                          SharedHeap::ScanningOption(roots_scanning_options()),
3011                          should_unload_classes(),
3012                          &notOlder,
3013                          NULL,
3014                          NULL);  // SSS: Provide correct closure
3015 
3016   // Now mark from the roots
3017   MarkFromRootsClosure markFromRootsClosure(this, _span,
3018     verification_mark_bm(), verification_mark_stack(),
3019     false /* don't yield */, true /* verifying */);
3020   assert(_restart_addr == NULL, "Expected pre-condition");
3021   verification_mark_bm()->iterate(&markFromRootsClosure);
3022   while (_restart_addr != NULL) {
3023     // Deal with stack overflow: by restarting at the indicated
3024     // address.
3025     HeapWord* ra = _restart_addr;
3026     markFromRootsClosure.reset(ra);
3027     _restart_addr = NULL;


3055   void do_klass(Klass* k) {
3056     k->oops_do(&_oop_closure);
3057   }
3058 };
3059 
3060 void CMSCollector::verify_after_remark_work_2() {
3061   ResourceMark rm;
3062   HandleMark  hm;
3063   GenCollectedHeap* gch = GenCollectedHeap::heap();
3064 
3065   // Get a clear set of claim bits for the roots processing to work with.
3066   ClassLoaderDataGraph::clear_claimed_marks();
3067 
3068   // Mark from roots one level into CMS
3069   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3070                                      markBitMap());
3071   CLDToOopClosure cld_closure(&notOlder, true);
3072 
3073   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3074 
3075   gch->gen_process_roots(Generation::Old,
3076                          true,   // younger gens are roots
3077                          true,   // activate StrongRootsScope
3078                          SharedHeap::ScanningOption(roots_scanning_options()),
3079                          should_unload_classes(),
3080                          &notOlder,
3081                          NULL,
3082                          &cld_closure);
3083 
3084   // Now mark from the roots
3085   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3086     verification_mark_bm(), markBitMap(), verification_mark_stack());
3087   assert(_restart_addr == NULL, "Expected pre-condition");
3088   verification_mark_bm()->iterate(&markFromRootsClosure);
3089   while (_restart_addr != NULL) {
3090     // Deal with stack overflow: by restarting at the indicated
3091     // address.
3092     HeapWord* ra = _restart_addr;
3093     markFromRootsClosure.reset(ra);
3094     _restart_addr = NULL;
3095     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());


3669     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3670       // The parallel version.
3671       FlexibleWorkGang* workers = gch->workers();
3672       assert(workers != NULL, "Need parallel worker threads.");
3673       int n_workers = workers->active_workers();
3674       CMSParInitialMarkTask tsk(this, n_workers);
3675       gch->set_par_threads(n_workers);
3676       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3677       if (n_workers > 1) {
3678         GenCollectedHeap::StrongRootsScope srs(gch);
3679         workers->run_task(&tsk);
3680       } else {
3681         GenCollectedHeap::StrongRootsScope srs(gch);
3682         tsk.work(0);
3683       }
3684       gch->set_par_threads(0);
3685     } else {
3686       // The serial version.
3687       CLDToOopClosure cld_closure(&notOlder, true);
3688       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3689       gch->gen_process_roots(Generation::Old,
3690                              true,   // younger gens are roots
3691                              true,   // activate StrongRootsScope
3692                              SharedHeap::ScanningOption(roots_scanning_options()),
3693                              should_unload_classes(),
3694                              &notOlder,
3695                              NULL,
3696                              &cld_closure);
3697     }
3698   }
3699 
3700   // Clear mod-union table; it will be dirtied in the prologue of
3701   // CMS generation per each younger generation collection.
3702 
3703   assert(_modUnionTable.isAllClear(),
3704        "Was cleared in most recent final checkpoint phase"
3705        " or no bits are set in the gc_prologue before the start of the next "
3706        "subsequent marking phase.");
3707 
3708   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3709 


4945          "world should be stopped");
4946   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4947 
4948   verify_work_stacks_empty();
4949   verify_overflow_empty();
4950 
4951   SpecializationStats::clear();
4952   if (PrintGCDetails) {
4953     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4954                         _young_gen->used() / K,
4955                         _young_gen->capacity() / K);
4956   }
4957   if (asynch) {
4958     if (CMSScavengeBeforeRemark) {
4959       GenCollectedHeap* gch = GenCollectedHeap::heap();
4960       // Temporarily set flag to false, GCH->do_collection will
4961       // expect it to be false and set to true
4962       FlagSetting fl(gch->_is_gc_active, false);
4963       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4964         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)


4965       gch->do_collection(true,             // full (i.e. force, see below)
4966                          false,            // !clear_all_soft_refs
4967                          0,                // size
4968                          false,            // is_tlab
4969                          Generation::Young // type
4970         );
4971     }

4972     FreelistLocker x(this);
4973     MutexLockerEx y(bitMapLock(),
4974                     Mutex::_no_safepoint_check_flag);
4975     assert(!init_mark_was_synchronous, "but that's impossible!");
4976     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4977   } else {
4978     // already have all the locks
4979     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4980                              init_mark_was_synchronous);
4981   }
4982   verify_work_stacks_empty();
4983   verify_overflow_empty();
4984   SpecializationStats::print();
4985 }
4986 
4987 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4988   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4989 
4990   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4991 


5137   GenCollectedHeap* gch = GenCollectedHeap::heap();
5138   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5139 
5140   // ---------- young gen roots --------------
5141   {
5142     work_on_young_gen_roots(worker_id, &par_mri_cl);
5143     _timer.stop();
5144     if (PrintCMSStatistics != 0) {
5145       gclog_or_tty->print_cr(
5146         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5147         worker_id, _timer.seconds());
5148     }
5149   }
5150 
5151   // ---------- remaining roots --------------
5152   _timer.reset();
5153   _timer.start();
5154 
5155   CLDToOopClosure cld_closure(&par_mri_cl, true);
5156 
5157   gch->gen_process_roots(Generation::Old,
5158                          false,     // yg was scanned above
5159                          false,     // this is parallel code
5160                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5161                          _collector->should_unload_classes(),
5162                          &par_mri_cl,
5163                          NULL,
5164                          &cld_closure);
5165   assert(_collector->should_unload_classes()
5166          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5167          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5168   _timer.stop();
5169   if (PrintCMSStatistics != 0) {
5170     gclog_or_tty->print_cr(
5171       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5172       worker_id, _timer.seconds());
5173   }
5174 }
5175 
5176 // Parallel remark task
5177 class CMSParRemarkTask: public CMSParMarkTask {


5273     work_queue(worker_id));
5274 
5275   // Rescan young gen roots first since these are likely
5276   // coarsely partitioned and may, on that account, constitute
5277   // the critical path; thus, it's best to start off that
5278   // work first.
5279   // ---------- young gen roots --------------
5280   {
5281     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5282     _timer.stop();
5283     if (PrintCMSStatistics != 0) {
5284       gclog_or_tty->print_cr(
5285         "Finished young gen rescan work in %dth thread: %3.3f sec",
5286         worker_id, _timer.seconds());
5287     }
5288   }
5289 
5290   // ---------- remaining roots --------------
5291   _timer.reset();
5292   _timer.start();
5293   gch->gen_process_roots(Generation::Old,
5294                          false,     // yg was scanned above
5295                          false,     // this is parallel code
5296                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5297                          _collector->should_unload_classes(),
5298                          &par_mrias_cl,
5299                          NULL,
5300                          NULL);     // The dirty klasses will be handled below
5301 
5302   assert(_collector->should_unload_classes()
5303          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5304          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5305   _timer.stop();
5306   if (PrintCMSStatistics != 0) {
5307     gclog_or_tty->print_cr(
5308       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5309       worker_id, _timer.seconds());
5310   }
5311 
5312   // ---------- unhandled CLD scanning ----------
5313   if (worker_id == 0) { // Single threaded at the moment.


5865       verify_work_stacks_empty();
5866       if (PrintCMSStatistics != 0) {
5867         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5868           markFromDirtyCardsClosure.num_dirty_cards());
5869       }
5870     }
5871   }
5872   if (VerifyDuringGC &&
5873       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5874     HandleMark hm;  // Discard invalid handles created during verification
5875     Universe::verify();
5876   }
5877   {
5878     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5879 
5880     verify_work_stacks_empty();
5881 
5882     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5883     GenCollectedHeap::StrongRootsScope srs(gch);
5884 
5885     gch->gen_process_roots(Generation::Old,
5886                            true,  // younger gens as roots
5887                            false, // use the local StrongRootsScope
5888                            SharedHeap::ScanningOption(roots_scanning_options()),
5889                            should_unload_classes(),
5890                            &mrias_cl,
5891                            NULL,
5892                            NULL); // The dirty klasses will be handled below
5893 
5894     assert(should_unload_classes()
5895            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5896            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5897   }
5898 
5899   {
5900     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5901 
5902     verify_work_stacks_empty();
5903 
5904     // Scan all class loader data objects that might have been introduced
5905     // during concurrent marking.


6342   size_t nearLargestOffset =
6343     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6344   if (PrintFLSStatistics != 0) {
6345     gclog_or_tty->print_cr(
6346       "CMS: Large Block: " PTR_FORMAT ";"
6347       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6348       largestAddr,
6349       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6350   }
6351   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6352 }
6353 
6354 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6355   return addr >= _cmsSpace->nearLargestChunk();
6356 }
6357 
6358 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6359   return _cmsSpace->find_chunk_at_end();
6360 }
6361 
6362 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
6363                                                     bool full) {
6364   // If the young generation has been collected. Gather any statistics
6365   // that are of interest at this point.
6366   bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen());
6367   if (!full && current_is_young) {
6368     // Gather statistics on the young generation collection.
6369     collector()->stats().record_gc0_end(used());
6370   }
6371 }
6372 
6373 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6374   if (PrintGCDetails && Verbose) {
6375     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6376   }
6377   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6378   _debug_collection_type =
6379     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6380   if (PrintGCDetails && Verbose) {
6381     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6382   }
6383 }
6384 
6385 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6386   bool asynch) {
6387   // We iterate over the space(s) underlying this generation,


src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File