Print this page
rev 2891 : 7120038: G1: ParallelGCThreads==0 is broken
Summary: Running G1 with ParallelGCThreads==0 results in various crashes and asserts. Most of these are caused by unguarded references to the worker threads array or an incorrect number of active workers.
Reviewed-by:

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
          +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
↓ open down ↓ 3524 lines elided ↑ open up ↑
3525 3525                           /* silent      */ false,
3526 3526                           /* option      */ VerifyOption_G1UsePrevMarking);
3527 3527  
3528 3528        }
3529 3529  
3530 3530        COMPILER2_PRESENT(DerivedPointerTable::clear());
3531 3531  
3532 3532        // Please see comment in g1CollectedHeap.hpp and
3533 3533        // G1CollectedHeap::ref_processing_init() to see how
3534 3534        // reference processing currently works in G1.
3535      -
     3535 +      
3536 3536        // Enable discovery in the STW reference processor
3537 3537        ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3538 3538                                              true /*verify_no_refs*/);
3539 3539  
3540 3540        {
3541 3541          // We want to temporarily turn off discovery by the
3542 3542          // CM ref processor, if necessary, and turn it back on
3543 3543          // on again later if we do. Using a scoped
3544 3544          // NoRefDiscovery object will do this.
3545 3545          NoRefDiscovery no_cm_discovery(ref_processor_cm());
↓ open down ↓ 170 lines elided ↑ open up ↑
3716 3716                // committed/uncommitted amount match the backing store
3717 3717                assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3718 3718                assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3719 3719              }
3720 3720            }
3721 3721          }
3722 3722  
3723 3723          double end_time_sec = os::elapsedTime();
3724 3724          double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
3725 3725          g1_policy()->record_pause_time_ms(pause_time_ms);
3726      -        int active_gc_threads = workers()->active_workers();
3727      -        g1_policy()->record_collection_pause_end(active_gc_threads);
     3726 +        int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
     3727 +                                workers()->active_workers() : 1);
     3728 +        g1_policy()->record_collection_pause_end(active_workers);
3728 3729  
3729 3730          MemoryService::track_memory_usage();
3730 3731  
3731 3732          // In prepare_for_verify() below we'll need to scan the deferred
3732 3733          // update buffers to bring the RSets up-to-date if
3733 3734          // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3734 3735          // the update buffers we'll probably need to scan cards on the
3735 3736          // regions we just allocated to (i.e., the GC alloc
3736 3737          // regions). However, during the last GC we called
3737 3738          // set_saved_mark() on all the GC alloc regions, so card
↓ open down ↓ 1503 lines elided ↑ open up ↑
5241 5242    // will be processed at the end of remarking.
5242 5243    //
5243 5244    // We also need to do this copying before we process the reference
5244 5245    // objects discovered by the STW ref processor in case one of these
5245 5246    // referents points to another object which is also referenced by an
5246 5247    // object discovered by the STW ref processor.
5247 5248  
5248 5249    int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5249 5250                          workers()->active_workers() : 1);
5250 5251  
5251      -  assert(active_workers == workers()->active_workers(),
5252      -         "Need to reset active_workers");
     5252 +  assert(!G1CollectedHeap::use_parallel_gc_threads() ||
     5253 +           active_workers == workers()->active_workers(),
     5254 +           "Need to reset active_workers");
     5255 +
5253 5256    set_par_threads(active_workers);
5254 5257    G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
5255 5258  
5256 5259    if (G1CollectedHeap::use_parallel_gc_threads()) {
5257 5260      workers()->run_task(&keep_cm_referents);
5258 5261    } else {
5259 5262      keep_cm_referents.work(0);
5260 5263    }
5261 5264  
5262 5265    set_par_threads(0);
↓ open down ↓ 117 lines elided ↑ open up ↑
5380 5383  
5381 5384    int n_workers;
5382 5385    if (G1CollectedHeap::use_parallel_gc_threads()) {
5383 5386      n_workers =
5384 5387        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5385 5388                                       workers()->active_workers(),
5386 5389                                       Threads::number_of_non_daemon_threads());
5387 5390      assert(UseDynamicNumberOfGCThreads ||
5388 5391             n_workers == workers()->total_workers(),
5389 5392             "If not dynamic should be using all the  workers");
     5393 +    workers()->set_active_workers(n_workers);
5390 5394      set_par_threads(n_workers);
5391 5395    } else {
5392 5396      assert(n_par_threads() == 0,
5393 5397             "Should be the original non-parallel value");
5394 5398      n_workers = 1;
5395 5399    }
5396      -  workers()->set_active_workers(n_workers);
5397 5400  
5398 5401    G1ParTask g1_par_task(this, _task_queues);
5399 5402  
5400 5403    init_for_evac_failure(NULL);
5401 5404  
5402 5405    rem_set()->prepare_for_younger_refs_iterate(true);
5403 5406  
5404 5407    assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5405 5408    double start_par = os::elapsedTime();
5406 5409  
↓ open down ↓ 1 lines elided ↑ open up ↑
5408 5411      // The individual threads will set their evac-failure closures.
5409 5412      StrongRootsScope srs(this);
5410 5413      if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5411 5414      // These tasks use ShareHeap::_process_strong_tasks
5412 5415      assert(UseDynamicNumberOfGCThreads ||
5413 5416             workers()->active_workers() == workers()->total_workers(),
5414 5417             "If not dynamic should be using all the  workers");
5415 5418      workers()->run_task(&g1_par_task);
5416 5419    } else {
5417 5420      StrongRootsScope srs(this);
     5421 +    g1_par_task.set_for_termination(n_workers);
5418 5422      g1_par_task.work(0);
5419 5423    }
5420 5424  
5421 5425    double par_time = (os::elapsedTime() - start_par) * 1000.0;
5422 5426    g1_policy()->record_par_time(par_time);
5423 5427  
5424 5428    set_par_threads(0);
5425 5429  
5426 5430    // Process any discovered reference objects - we have
5427 5431    // to do this _before_ we retire the GC alloc regions
↓ open down ↓ 637 lines elided ↑ open up ↑
6065 6069  }
6066 6070  
6067 6071  HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6068 6072                                                      bool force) {
6069 6073    return _g1h->new_mutator_alloc_region(word_size, force);
6070 6074  }
6071 6075  
6072 6076  void G1CollectedHeap::set_par_threads() {
6073 6077    // Don't change the number of workers.  Use the value previously set
6074 6078    // in the workgroup.
     6079 +  assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6075 6080    int n_workers = workers()->active_workers();
6076      -    assert(UseDynamicNumberOfGCThreads ||
     6081 +  assert(UseDynamicNumberOfGCThreads ||
6077 6082             n_workers == workers()->total_workers(),
6078 6083        "Otherwise should be using the total number of workers");
6079 6084    if (n_workers == 0) {
6080 6085      assert(false, "Should have been set in prior evacuation pause.");
6081 6086      n_workers = ParallelGCThreads;
6082 6087      workers()->set_active_workers(n_workers);
6083 6088    }
6084 6089    set_par_threads(n_workers);
6085 6090  }
6086 6091  
↓ open down ↓ 164 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX