1258 gclog_or_tty->print(" VerifyDuringGC:(before)");
1259 Universe::heap()->prepare_for_verify();
1260 Universe::verify(/* silent */ false,
1261 /* option */ VerifyOption_G1UsePrevMarking);
1262 }
1263
1264 G1CollectorPolicy* g1p = g1h->g1_policy();
1265 g1p->record_concurrent_mark_remark_start();
1266
1267 double start = os::elapsedTime();
1268
1269 checkpointRootsFinalWork();
1270
1271 double mark_work_end = os::elapsedTime();
1272
1273 weakRefsWork(clear_all_soft_refs);
1274
1275 if (has_overflown()) {
1276 // Oops. We overflowed. Restart concurrent marking.
1277 _restart_for_overflow = true;
1278 // Clear the marking state because we will be restarting
1279 // marking due to overflowing the global mark stack.
1280 reset_marking_state();
1281 if (G1TraceMarkStackOverflow) {
1282 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1283 }
1284 } else {
1285 // Aggregate the per-task counting data that we have accumulated
1286 // while marking.
1287 aggregate_count_data();
1288
1289 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1290 // We're done with marking.
1291 // This is the end of the marking cycle, we're expected all
1292 // threads to have SATB queues with active set to true.
1293 satb_mq_set.set_active_all_threads(false, /* new active value */
1294 true /* expected_active */);
1295
1296 if (VerifyDuringGC) {
1297 HandleMark hm; // handle scope
1298 gclog_or_tty->print(" VerifyDuringGC:(after)");
1299 Universe::heap()->prepare_for_verify();
1300 Universe::verify(/* silent */ false,
1301 /* option */ VerifyOption_G1UseNextMarking);
1302 }
1303 assert(!restart_for_overflow(), "sanity");
2378 AbstractGangTask("Enqueue reference objects in parallel"),
2379 _enq_task(enq_task) { }
2380
2381 virtual void work(uint worker_id) {
2382 _enq_task.work(worker_id);
2383 }
2384 };
2385
2386 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2387 assert(_workers != NULL, "Need parallel worker threads.");
2388 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2389
2390 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2391
2392 _g1h->set_par_threads(_active_workers);
2393 _workers->run_task(&enq_task_proxy);
2394 _g1h->set_par_threads(0);
2395 }
2396
2397 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2398 ResourceMark rm;
2399 HandleMark hm;
2400
2401 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2402
2403 // Is alive closure.
2404 G1CMIsAliveClosure g1_is_alive(g1h);
2405
2406 // Inner scope to exclude the cleaning of the string and symbol
2407 // tables from the displayed time.
2408 {
2409 if (G1Log::finer()) {
2410 gclog_or_tty->put(' ');
2411 }
2412 TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2413
2414 ReferenceProcessor* rp = g1h->ref_processor_cm();
2415
2416 // See the comment in G1CollectedHeap::ref_processing_init()
2417 // about how reference processing currently works in G1.
2535 // constructor and pass values of the active workers
2536 // through the gang in the task.
2537
2538 CMRemarkTask remarkTask(this, active_workers);
2539 g1h->set_par_threads(active_workers);
2540 g1h->workers()->run_task(&remarkTask);
2541 g1h->set_par_threads(0);
2542 } else {
2543 G1CollectedHeap::StrongRootsScope srs(g1h);
2544 // this is remark, so we'll use up all available threads
2545 uint active_workers = 1;
2546 set_phase(active_workers, false /* concurrent */);
2547
2548 CMRemarkTask remarkTask(this, active_workers);
2549 // We will start all available threads, even if we decide that the
2550 // active_workers will be fewer. The extra ones will just bail out
2551 // immediately.
2552 remarkTask.work(0);
2553 }
2554 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2555 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2556
2557 print_stats();
2558
2559 #if VERIFY_OBJS_PROCESSED
2560 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2561 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2562 _scan_obj_cl.objs_processed,
2563 ThreadLocalObjQueue::objs_enqueued);
2564 guarantee(_scan_obj_cl.objs_processed ==
2565 ThreadLocalObjQueue::objs_enqueued,
2566 "Different number of objs processed and enqueued.");
2567 }
2568 #endif
2569 }
2570
2571 #ifndef PRODUCT
2572
2573 class PrintReachableOopClosure: public OopClosure {
2574 private:
2575 G1CollectedHeap* _g1h;
|
1258 gclog_or_tty->print(" VerifyDuringGC:(before)");
1259 Universe::heap()->prepare_for_verify();
1260 Universe::verify(/* silent */ false,
1261 /* option */ VerifyOption_G1UsePrevMarking);
1262 }
1263
1264 G1CollectorPolicy* g1p = g1h->g1_policy();
1265 g1p->record_concurrent_mark_remark_start();
1266
1267 double start = os::elapsedTime();
1268
1269 checkpointRootsFinalWork();
1270
1271 double mark_work_end = os::elapsedTime();
1272
1273 weakRefsWork(clear_all_soft_refs);
1274
1275 if (has_overflown()) {
1276 // Oops. We overflowed. Restart concurrent marking.
1277 _restart_for_overflow = true;
1278 if (G1TraceMarkStackOverflow) {
1279 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1280 }
1281
1282 // Verify the heap w.r.t. the previous marking bitmap.
1283 if (VerifyDuringGC) {
1284 HandleMark hm; // handle scope
1285 gclog_or_tty->print(" VerifyDuringGC:(overflow)");
1286 Universe::heap()->prepare_for_verify();
1287 Universe::verify(/* silent */ false,
1288 /* option */ VerifyOption_G1UsePrevMarking);
1289 }
1290
1291 // Clear the marking state because we will be restarting
1292 // marking due to overflowing the global mark stack.
1293 reset_marking_state();
1294 } else {
1295 // Aggregate the per-task counting data that we have accumulated
1296 // while marking.
1297 aggregate_count_data();
1298
1299 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1300 // We're done with marking.
1301 // This is the end of the marking cycle, we're expected all
1302 // threads to have SATB queues with active set to true.
1303 satb_mq_set.set_active_all_threads(false, /* new active value */
1304 true /* expected_active */);
1305
1306 if (VerifyDuringGC) {
1307 HandleMark hm; // handle scope
1308 gclog_or_tty->print(" VerifyDuringGC:(after)");
1309 Universe::heap()->prepare_for_verify();
1310 Universe::verify(/* silent */ false,
1311 /* option */ VerifyOption_G1UseNextMarking);
1312 }
1313 assert(!restart_for_overflow(), "sanity");
2388 AbstractGangTask("Enqueue reference objects in parallel"),
2389 _enq_task(enq_task) { }
2390
2391 virtual void work(uint worker_id) {
2392 _enq_task.work(worker_id);
2393 }
2394 };
2395
2396 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2397 assert(_workers != NULL, "Need parallel worker threads.");
2398 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2399
2400 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2401
2402 _g1h->set_par_threads(_active_workers);
2403 _workers->run_task(&enq_task_proxy);
2404 _g1h->set_par_threads(0);
2405 }
2406
2407 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2408 if (has_overflown()) {
2409 // If we have overflown the marking stack then just return
2410 // without processing the discovered references. We will be
2411 // restarting marking because of the overflow and any
2412 // currently discovered reference will stay discovered.
2413 // They will be processed when the remark task successfully
2414 // completes.
2415 return;
2416 }
2417
2418 ResourceMark rm;
2419 HandleMark hm;
2420
2421 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2422
2423 // Is alive closure.
2424 G1CMIsAliveClosure g1_is_alive(g1h);
2425
2426 // Inner scope to exclude the cleaning of the string and symbol
2427 // tables from the displayed time.
2428 {
2429 if (G1Log::finer()) {
2430 gclog_or_tty->put(' ');
2431 }
2432 TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2433
2434 ReferenceProcessor* rp = g1h->ref_processor_cm();
2435
2436 // See the comment in G1CollectedHeap::ref_processing_init()
2437 // about how reference processing currently works in G1.
2555 // constructor and pass values of the active workers
2556 // through the gang in the task.
2557
2558 CMRemarkTask remarkTask(this, active_workers);
2559 g1h->set_par_threads(active_workers);
2560 g1h->workers()->run_task(&remarkTask);
2561 g1h->set_par_threads(0);
2562 } else {
2563 G1CollectedHeap::StrongRootsScope srs(g1h);
2564 // this is remark, so we'll use up all available threads
2565 uint active_workers = 1;
2566 set_phase(active_workers, false /* concurrent */);
2567
2568 CMRemarkTask remarkTask(this, active_workers);
2569 // We will start all available threads, even if we decide that the
2570 // active_workers will be fewer. The extra ones will just bail out
2571 // immediately.
2572 remarkTask.work(0);
2573 }
2574 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2575 guarantee(has_overflown() ||
2576 satb_mq_set.completed_buffers_num() == 0,
2577 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2578 BOOL_TO_STR(has_overflown()),
2579 satb_mq_set.completed_buffers_num()));
2580
2581 print_stats();
2582
2583 #if VERIFY_OBJS_PROCESSED
2584 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2585 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2586 _scan_obj_cl.objs_processed,
2587 ThreadLocalObjQueue::objs_enqueued);
2588 guarantee(_scan_obj_cl.objs_processed ==
2589 ThreadLocalObjQueue::objs_enqueued,
2590 "Different number of objs processed and enqueued.");
2591 }
2592 #endif
2593 }
2594
2595 #ifndef PRODUCT
2596
2597 class PrintReachableOopClosure: public OopClosure {
2598 private:
2599 G1CollectedHeap* _g1h;
|