Print this page
rev 2585 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp
          +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp
↓ open down ↓ 842 lines elided ↑ open up ↑
 843  843    // force an oveflow during remark we'll never actually complete the
 844  844    // marking phase. So, we initilize this here, at the start of the
 845  845    // cycle, so that at the remaining overflow number will decrease at
 846  846    // every remark and we'll eventually not need to cause one.
 847  847    force_overflow_stw()->init();
 848  848  
 849  849    // For each region note start of marking.
 850  850    NoteStartOfMarkHRClosure startcl;
 851  851    g1h->heap_region_iterate(&startcl);
 852  852  
 853      -  // Start weak-reference discovery.
 854      -  ReferenceProcessor* rp = g1h->ref_processor();
      853 +  // Start Concurrent Marking weak-reference discovery.
      854 +  ReferenceProcessor* rp = g1h->ref_processor_cm();
      855 +  assert(!rp->discovery_enabled(), "Precondition");
 855  856    rp->verify_no_references_recorded();
 856  857    rp->enable_discovery(); // enable ("weak") refs discovery
 857  858    rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 858  859  
 859  860    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 860  861    // This is the start of  the marking cycle, we're expected all
 861  862    // threads to have SATB queues with active set to false.
 862  863    satb_mq_set.set_active_all_threads(true, /* new active value */
 863  864                                       false /* expected_active */);
 864  865  
↓ open down ↓ 337 lines elided ↑ open up ↑
1202 1203    } else {
1203 1204      markingTask.work(0);
1204 1205    }
1205 1206    print_stats();
1206 1207  }
1207 1208  
1208 1209  void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1209 1210    // world is stopped at this checkpoint
1210 1211    assert(SafepointSynchronize::is_at_safepoint(),
1211 1212           "world should be stopped");
     1213 +
1212 1214    G1CollectedHeap* g1h = G1CollectedHeap::heap();
1213 1215  
1214 1216    // If a full collection has happened, we shouldn't do this.
1215 1217    if (has_aborted()) {
1216 1218      g1h->set_marking_complete(); // So bitmap clearing isn't confused
1217 1219      return;
1218 1220    }
1219 1221  
1220 1222    SvcGCMarker sgcm(SvcGCMarker::OTHER);
1221 1223  
↓ open down ↓ 681 lines elided ↑ open up ↑
1903 1905    if (PrintGC || PrintGCDetails) {
1904 1906      g1h->print_size_transition(gclog_or_tty,
1905 1907                                 start_used_bytes,
1906 1908                                 g1h->used(),
1907 1909                                 g1h->capacity());
1908 1910    }
1909 1911  
1910 1912    size_t cleaned_up_bytes = start_used_bytes - g1h->used();
1911 1913    g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
1912 1914  
     1915 +  // Clean up will have freed any regions completely full of garbage.
     1916 +  // Update the soft reference policy with the new heap occupancy.
     1917 +  Universe::update_heap_info_at_gc();
     1918 +
1913 1919    // We need to make this be a "collection" so any collection pause that
1914 1920    // races with it goes around and waits for completeCleanup to finish.
1915 1921    g1h->increment_total_collections();
1916 1922  
1917 1923    if (VerifyDuringGC) {
1918 1924      HandleMark hm;  // handle scope
1919 1925      gclog_or_tty->print(" VerifyDuringGC:(after)");
1920 1926      Universe::heap()->prepare_for_verify();
1921 1927      Universe::verify(/* allow dirty */ true,
1922 1928                       /* silent      */ false,
↓ open down ↓ 215 lines elided ↑ open up ↑
2138 2144        // CMTask::do_marking_step() returns without setting the has_aborted() flag
2139 2145        // that the marking has completed.
2140 2146  
2141 2147        _task->do_marking_step(1000000000.0 /* something very large */,
2142 2148                               true /* do_stealing    */,
2143 2149                               true /* do_termination */);
2144 2150      } while (_task->has_aborted() && !_cm->has_overflown());
2145 2151    }
2146 2152  };
2147 2153  
2148      -// Implementation of AbstractRefProcTaskExecutor for G1
2149      -class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
     2154 +// Implementation of AbstractRefProcTaskExecutor for parallel
     2155 +// reference processing at the end of G1 concurrent marking
     2156 +
     2157 +class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2150 2158  private:
2151 2159    G1CollectedHeap* _g1h;
2152 2160    ConcurrentMark*  _cm;
2153 2161    CMBitMap*        _bitmap;
2154 2162    WorkGang*        _workers;
2155 2163    int              _active_workers;
2156 2164  
2157 2165  public:
2158      -  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
     2166 +  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2159 2167                          ConcurrentMark* cm,
2160 2168                          CMBitMap* bitmap,
2161 2169                          WorkGang* workers,
2162 2170                          int n_workers) :
2163 2171      _g1h(g1h), _cm(cm), _bitmap(bitmap),
2164 2172      _workers(workers), _active_workers(n_workers)
2165 2173    { }
2166 2174  
2167 2175    // Executes the given task using concurrent marking worker threads.
2168 2176    virtual void execute(ProcessTask& task);
2169 2177    virtual void execute(EnqueueTask& task);
2170 2178  };
2171 2179  
2172      -class G1RefProcTaskProxy: public AbstractGangTask {
     2180 +class G1CMRefProcTaskProxy: public AbstractGangTask {
2173 2181    typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2174 2182    ProcessTask&     _proc_task;
2175 2183    G1CollectedHeap* _g1h;
2176 2184    ConcurrentMark*  _cm;
2177 2185    CMBitMap*        _bitmap;
2178 2186  
2179 2187  public:
2180      -  G1RefProcTaskProxy(ProcessTask& proc_task,
     2188 +  G1CMRefProcTaskProxy(ProcessTask& proc_task,
2181 2189                       G1CollectedHeap* g1h,
2182 2190                       ConcurrentMark* cm,
2183 2191                       CMBitMap* bitmap) :
2184 2192      AbstractGangTask("Process reference objects in parallel"),
2185 2193      _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2186 2194    {}
2187 2195  
2188 2196    virtual void work(int i) {
2189 2197      CMTask* marking_task = _cm->task(i);
2190 2198      G1CMIsAliveClosure g1_is_alive(_g1h);
2191 2199      G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2192 2200      G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2193 2201  
2194 2202      _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2195 2203    }
2196 2204  };
2197 2205  
2198      -void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
     2206 +void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2199 2207    assert(_workers != NULL, "Need parallel worker threads.");
2200 2208  
2201      -  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
     2209 +  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2202 2210  
2203 2211    // We need to reset the phase for each task execution so that
2204 2212    // the termination protocol of CMTask::do_marking_step works.
2205 2213    _cm->set_phase(_active_workers, false /* concurrent */);
2206 2214    _g1h->set_par_threads(_active_workers);
2207 2215    _workers->run_task(&proc_task_proxy);
2208 2216    _g1h->set_par_threads(0);
2209 2217  }
2210 2218  
2211      -class G1RefEnqueueTaskProxy: public AbstractGangTask {
     2219 +class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2212 2220    typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2213 2221    EnqueueTask& _enq_task;
2214 2222  
2215 2223  public:
2216      -  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
     2224 +  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2217 2225      AbstractGangTask("Enqueue reference objects in parallel"),
2218 2226      _enq_task(enq_task)
2219 2227    { }
2220 2228  
2221 2229    virtual void work(int i) {
2222 2230      _enq_task.work(i);
2223 2231    }
2224 2232  };
2225 2233  
2226      -void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
     2234 +void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2227 2235    assert(_workers != NULL, "Need parallel worker threads.");
2228 2236  
2229      -  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
     2237 +  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2230 2238  
2231 2239    _g1h->set_par_threads(_active_workers);
2232 2240    _workers->run_task(&enq_task_proxy);
2233 2241    _g1h->set_par_threads(0);
2234 2242  }
2235 2243  
2236 2244  void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2237 2245    ResourceMark rm;
2238 2246    HandleMark   hm;
2239 2247    G1CollectedHeap* g1h   = G1CollectedHeap::heap();
2240      -  ReferenceProcessor* rp = g1h->ref_processor();
     2248 +  ReferenceProcessor* rp = g1h->ref_processor_cm();
2241 2249  
2242 2250    // See the comment in G1CollectedHeap::ref_processing_init()
2243 2251    // about how reference processing currently works in G1.
2244 2252  
2245 2253    // Process weak references.
2246 2254    rp->setup_policy(clear_all_soft_refs);
2247 2255    assert(_markStack.isEmpty(), "mark stack should be empty");
2248 2256  
2249 2257    G1CMIsAliveClosure   g1_is_alive(g1h);
2250 2258    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
2251 2259    G1CMDrainMarkingStackClosure
2252 2260      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
2253 2261    // We use the work gang from the G1CollectedHeap and we utilize all
2254 2262    // the worker threads.
2255 2263    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
2256 2264    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
2257 2265  
2258      -  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
     2266 +  G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2259 2267                                            g1h->workers(), active_workers);
2260 2268  
2261 2269  
2262 2270    if (rp->processing_is_mt()) {
2263 2271      // Set the degree of MT here.  If the discovery is done MT, there
2264 2272      // may have been a different number of threads doing the discovery
2265 2273      // and a different number of discovered lists may have Ref objects.
2266 2274      // That is OK as long as the Reference lists are balanced (see
2267 2275      // balance_all_queues() and balance_queues()).
2268 2276      rp->set_active_mt_degree(active_workers);
↓ open down ↓ 23 lines elided ↑ open up ↑
2292 2300    }
2293 2301  
2294 2302    if (rp->processing_is_mt()) {
2295 2303      assert(rp->num_q() == active_workers, "why not");
2296 2304      rp->enqueue_discovered_references(&par_task_executor);
2297 2305    } else {
2298 2306      rp->enqueue_discovered_references();
2299 2307    }
2300 2308  
2301 2309    rp->verify_no_references_recorded();
2302      -  assert(!rp->discovery_enabled(), "should have been disabled");
     2310 +  assert(!rp->discovery_enabled(), "Post condition");
2303 2311  
2304 2312    // Now clean up stale oops in StringTable
2305 2313    StringTable::unlink(&g1_is_alive);
2306 2314    // Clean up unreferenced symbols in symbol table.
2307 2315    SymbolTable::unlink();
2308 2316  }
2309 2317  
2310 2318  void ConcurrentMark::swapMarkBitMaps() {
2311 2319    CMBitMapRO* temp = _prevMarkBitMap;
2312 2320    _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
↓ open down ↓ 1082 lines elided ↑ open up ↑
3395 3403    CMObjectClosure(CMTask* task) : _task(task) { }
3396 3404  };
3397 3405  
3398 3406  G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3399 3407                                 ConcurrentMark* cm,
3400 3408                                 CMTask* task)
3401 3409    : _g1h(g1h), _cm(cm), _task(task) {
3402 3410    assert(_ref_processor == NULL, "should be initialized to NULL");
3403 3411  
3404 3412    if (G1UseConcMarkReferenceProcessing) {
3405      -    _ref_processor = g1h->ref_processor();
     3413 +    _ref_processor = g1h->ref_processor_cm();
3406 3414      assert(_ref_processor != NULL, "should not be NULL");
3407 3415    }
3408 3416  }
3409 3417  
3410 3418  void CMTask::setup_for_region(HeapRegion* hr) {
3411 3419    // Separated the asserts so that we know which one fires.
3412 3420    assert(hr != NULL,
3413 3421          "claim_region() should have filtered out continues humongous regions");
3414 3422    assert(!hr->continuesHumongous(),
3415 3423          "claim_region() should have filtered out continues humongous regions");
↓ open down ↓ 1331 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX