Print this page
rev 2691 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp
          +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp
↓ open down ↓ 810 lines elided ↑ open up ↑
 811  811    // force an oveflow during remark we'll never actually complete the
 812  812    // marking phase. So, we initilize this here, at the start of the
 813  813    // cycle, so that at the remaining overflow number will decrease at
 814  814    // every remark and we'll eventually not need to cause one.
 815  815    force_overflow_stw()->init();
 816  816  
 817  817    // For each region note start of marking.
 818  818    NoteStartOfMarkHRClosure startcl;
 819  819    g1h->heap_region_iterate(&startcl);
 820  820  
 821      -  // Start weak-reference discovery.
 822      -  ReferenceProcessor* rp = g1h->ref_processor();
 823      -  rp->verify_no_references_recorded();
 824      -  rp->enable_discovery(); // enable ("weak") refs discovery
      821 +  // Start Concurrent Marking weak-reference discovery.
      822 +  ReferenceProcessor* rp = g1h->ref_processor_cm();
      823 +  // enable ("weak") refs discovery
      824 +  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 825  825    rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 826  826  
 827  827    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 828  828    // This is the start of  the marking cycle, we're expected all
 829  829    // threads to have SATB queues with active set to false.
 830  830    satb_mq_set.set_active_all_threads(true, /* new active value */
 831  831                                       false /* expected_active */);
 832  832  
 833  833    // update_g1_committed() will be called at the end of an evac pause
 834  834    // when marking is on. So, it's also called at the end of the
↓ open down ↓ 291 lines elided ↑ open up ↑
1126 1126    } else {
1127 1127      markingTask.work(0);
1128 1128    }
1129 1129    print_stats();
1130 1130  }
1131 1131  
1132 1132  void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1133 1133    // world is stopped at this checkpoint
1134 1134    assert(SafepointSynchronize::is_at_safepoint(),
1135 1135           "world should be stopped");
     1136 +
1136 1137    G1CollectedHeap* g1h = G1CollectedHeap::heap();
1137 1138  
1138 1139    // If a full collection has happened, we shouldn't do this.
1139 1140    if (has_aborted()) {
1140 1141      g1h->set_marking_complete(); // So bitmap clearing isn't confused
1141 1142      return;
1142 1143    }
1143 1144  
1144 1145    SvcGCMarker sgcm(SvcGCMarker::OTHER);
1145 1146  
↓ open down ↓ 684 lines elided ↑ open up ↑
1830 1831    if (PrintGC || PrintGCDetails) {
1831 1832      g1h->print_size_transition(gclog_or_tty,
1832 1833                                 start_used_bytes,
1833 1834                                 g1h->used(),
1834 1835                                 g1h->capacity());
1835 1836    }
1836 1837  
1837 1838    size_t cleaned_up_bytes = start_used_bytes - g1h->used();
1838 1839    g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
1839 1840  
     1841 +  // Clean up will have freed any regions completely full of garbage.
     1842 +  // Update the soft reference policy with the new heap occupancy.
     1843 +  Universe::update_heap_info_at_gc();
     1844 +
1840 1845    // We need to make this be a "collection" so any collection pause that
1841 1846    // races with it goes around and waits for completeCleanup to finish.
1842 1847    g1h->increment_total_collections();
1843 1848  
1844 1849    if (VerifyDuringGC) {
1845 1850      HandleMark hm;  // handle scope
1846 1851      gclog_or_tty->print(" VerifyDuringGC:(after)");
1847 1852      Universe::heap()->prepare_for_verify();
1848 1853      Universe::verify(/* allow dirty */ true,
1849 1854                       /* silent      */ false,
↓ open down ↓ 215 lines elided ↑ open up ↑
2065 2070        // CMTask::do_marking_step() returns without setting the has_aborted() flag
2066 2071        // that the marking has completed.
2067 2072  
2068 2073        _task->do_marking_step(1000000000.0 /* something very large */,
2069 2074                               true /* do_stealing    */,
2070 2075                               true /* do_termination */);
2071 2076      } while (_task->has_aborted() && !_cm->has_overflown());
2072 2077    }
2073 2078  };
2074 2079  
2075      -// Implementation of AbstractRefProcTaskExecutor for G1
2076      -class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
     2080 +// Implementation of AbstractRefProcTaskExecutor for parallel
     2081 +// reference processing at the end of G1 concurrent marking
     2082 +
     2083 +class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2077 2084  private:
2078 2085    G1CollectedHeap* _g1h;
2079 2086    ConcurrentMark*  _cm;
2080 2087    CMBitMap*        _bitmap;
2081 2088    WorkGang*        _workers;
2082 2089    int              _active_workers;
2083 2090  
2084 2091  public:
2085      -  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
     2092 +  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2086 2093                          ConcurrentMark* cm,
2087 2094                          CMBitMap* bitmap,
2088 2095                          WorkGang* workers,
2089 2096                          int n_workers) :
2090 2097      _g1h(g1h), _cm(cm), _bitmap(bitmap),
2091 2098      _workers(workers), _active_workers(n_workers)
2092 2099    { }
2093 2100  
2094 2101    // Executes the given task using concurrent marking worker threads.
2095 2102    virtual void execute(ProcessTask& task);
2096 2103    virtual void execute(EnqueueTask& task);
2097 2104  };
2098 2105  
2099      -class G1RefProcTaskProxy: public AbstractGangTask {
     2106 +class G1CMRefProcTaskProxy: public AbstractGangTask {
2100 2107    typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2101 2108    ProcessTask&     _proc_task;
2102 2109    G1CollectedHeap* _g1h;
2103 2110    ConcurrentMark*  _cm;
2104 2111    CMBitMap*        _bitmap;
2105 2112  
2106 2113  public:
2107      -  G1RefProcTaskProxy(ProcessTask& proc_task,
     2114 +  G1CMRefProcTaskProxy(ProcessTask& proc_task,
2108 2115                       G1CollectedHeap* g1h,
2109 2116                       ConcurrentMark* cm,
2110 2117                       CMBitMap* bitmap) :
2111 2118      AbstractGangTask("Process reference objects in parallel"),
2112 2119      _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2113 2120    {}
2114 2121  
2115 2122    virtual void work(int i) {
2116 2123      CMTask* marking_task = _cm->task(i);
2117 2124      G1CMIsAliveClosure g1_is_alive(_g1h);
2118 2125      G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2119 2126      G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2120 2127  
2121 2128      _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2122 2129    }
2123 2130  };
2124 2131  
2125      -void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
     2132 +void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2126 2133    assert(_workers != NULL, "Need parallel worker threads.");
2127 2134  
2128      -  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
     2135 +  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2129 2136  
2130 2137    // We need to reset the phase for each task execution so that
2131 2138    // the termination protocol of CMTask::do_marking_step works.
2132 2139    _cm->set_phase(_active_workers, false /* concurrent */);
2133 2140    _g1h->set_par_threads(_active_workers);
2134 2141    _workers->run_task(&proc_task_proxy);
2135 2142    _g1h->set_par_threads(0);
2136 2143  }
2137 2144  
2138      -class G1RefEnqueueTaskProxy: public AbstractGangTask {
     2145 +class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2139 2146    typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2140 2147    EnqueueTask& _enq_task;
2141 2148  
2142 2149  public:
2143      -  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
     2150 +  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2144 2151      AbstractGangTask("Enqueue reference objects in parallel"),
2145 2152      _enq_task(enq_task)
2146 2153    { }
2147 2154  
2148 2155    virtual void work(int i) {
2149 2156      _enq_task.work(i);
2150 2157    }
2151 2158  };
2152 2159  
2153      -void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
     2160 +void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2154 2161    assert(_workers != NULL, "Need parallel worker threads.");
2155 2162  
2156      -  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
     2163 +  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2157 2164  
2158 2165    _g1h->set_par_threads(_active_workers);
2159 2166    _workers->run_task(&enq_task_proxy);
2160 2167    _g1h->set_par_threads(0);
2161 2168  }
2162 2169  
2163 2170  void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2164 2171    ResourceMark rm;
2165 2172    HandleMark   hm;
2166 2173    G1CollectedHeap* g1h   = G1CollectedHeap::heap();
2167      -  ReferenceProcessor* rp = g1h->ref_processor();
     2174 +  ReferenceProcessor* rp = g1h->ref_processor_cm();
2168 2175  
2169 2176    // See the comment in G1CollectedHeap::ref_processing_init()
2170 2177    // about how reference processing currently works in G1.
2171 2178  
2172 2179    // Process weak references.
2173 2180    rp->setup_policy(clear_all_soft_refs);
2174 2181    assert(_markStack.isEmpty(), "mark stack should be empty");
2175 2182  
2176 2183    G1CMIsAliveClosure   g1_is_alive(g1h);
2177 2184    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
2178 2185    G1CMDrainMarkingStackClosure
2179 2186      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
2180 2187    // We use the work gang from the G1CollectedHeap and we utilize all
2181 2188    // the worker threads.
2182 2189    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
2183 2190    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
2184 2191  
2185      -  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
     2192 +  G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2186 2193                                            g1h->workers(), active_workers);
2187 2194  
2188 2195  
2189 2196    if (rp->processing_is_mt()) {
2190 2197      // Set the degree of MT here.  If the discovery is done MT, there
2191 2198      // may have been a different number of threads doing the discovery
2192 2199      // and a different number of discovered lists may have Ref objects.
2193 2200      // That is OK as long as the Reference lists are balanced (see
2194 2201      // balance_all_queues() and balance_queues()).
2195 2202      rp->set_active_mt_degree(active_workers);
↓ open down ↓ 23 lines elided ↑ open up ↑
2219 2226    }
2220 2227  
2221 2228    if (rp->processing_is_mt()) {
2222 2229      assert(rp->num_q() == active_workers, "why not");
2223 2230      rp->enqueue_discovered_references(&par_task_executor);
2224 2231    } else {
2225 2232      rp->enqueue_discovered_references();
2226 2233    }
2227 2234  
2228 2235    rp->verify_no_references_recorded();
2229      -  assert(!rp->discovery_enabled(), "should have been disabled");
     2236 +  assert(!rp->discovery_enabled(), "Post condition");
2230 2237  
2231 2238    // Now clean up stale oops in StringTable
2232 2239    StringTable::unlink(&g1_is_alive);
2233 2240    // Clean up unreferenced symbols in symbol table.
2234 2241    SymbolTable::unlink();
2235 2242  }
2236 2243  
2237 2244  void ConcurrentMark::swapMarkBitMaps() {
2238 2245    CMBitMapRO* temp = _prevMarkBitMap;
2239 2246    _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
↓ open down ↓ 1082 lines elided ↑ open up ↑
3322 3329    CMObjectClosure(CMTask* task) : _task(task) { }
3323 3330  };
3324 3331  
3325 3332  G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3326 3333                                 ConcurrentMark* cm,
3327 3334                                 CMTask* task)
3328 3335    : _g1h(g1h), _cm(cm), _task(task) {
3329 3336    assert(_ref_processor == NULL, "should be initialized to NULL");
3330 3337  
3331 3338    if (G1UseConcMarkReferenceProcessing) {
3332      -    _ref_processor = g1h->ref_processor();
     3339 +    _ref_processor = g1h->ref_processor_cm();
3333 3340      assert(_ref_processor != NULL, "should not be NULL");
3334 3341    }
3335 3342  }
3336 3343  
3337 3344  void CMTask::setup_for_region(HeapRegion* hr) {
3338 3345    // Separated the asserts so that we know which one fires.
3339 3346    assert(hr != NULL,
3340 3347          "claim_region() should have filtered out continues humongous regions");
3341 3348    assert(!hr->continuesHumongous(),
3342 3349          "claim_region() should have filtered out continues humongous regions");
↓ open down ↓ 1331 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX