< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page

        

@@ -2903,12 +2903,12 @@
 
 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
 class CMSParMarkTask : public AbstractGangTask {
  protected:
   CMSCollector*     _collector;
-  int               _n_workers;
-  CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
+  uint              _n_workers;
+  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
       AbstractGangTask(name),
       _collector(collector),
       _n_workers(n_workers) {}
   // Work method in support of parallel rescan ... of young gen spaces
   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,

@@ -2918,11 +2918,11 @@
 };
 
 // Parallel initial mark task
 class CMSParInitialMarkTask: public CMSParMarkTask {
  public:
-  CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
+  CMSParInitialMarkTask(CMSCollector* collector, uint n_workers) :
       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
                      collector, n_workers) {}
   void work(uint worker_id);
 };
 

@@ -3007,11 +3007,11 @@
     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
       // The parallel version.
       FlexibleWorkGang* workers = gch->workers();
       assert(workers != NULL, "Need parallel worker threads.");
-      int n_workers = workers->active_workers();
+      uint n_workers = workers->active_workers();
       CMSParInitialMarkTask tsk(this, n_workers);
       gch->set_par_threads(n_workers);
       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
       if (n_workers > 1) {
         StrongRootsScope srs;

@@ -3148,11 +3148,11 @@
 };
 
 // MT Concurrent Marking Task
 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
   CMSCollector* _collector;
-  int           _n_workers;                  // requested/desired # workers
+  uint          _n_workers;                  // requested/desired # workers
   bool          _result;
   CompactibleFreeListSpace*  _cms_space;
   char          _pad_front[64];   // padding to ...
   HeapWord*     _global_finger;   // ... avoid sharing cache line
   char          _pad_back[64];

@@ -3194,11 +3194,11 @@
 
   HeapWord** global_finger_addr() { return &_global_finger; }
 
   CMSConcMarkingTerminator* terminator() { return &_term; }
 
-  virtual void set_for_termination(int active_workers) {
+  virtual void set_for_termination(uint active_workers) {
     terminator()->reset_for_reuse(active_workers);
   }
 
   void work(uint worker_id);
   bool should_yield() {

@@ -3640,11 +3640,11 @@
   _collector->startTimer();
 }
 
 bool CMSCollector::do_marking_mt() {
   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
-  int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
+  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
                                        conc_workers()->total_workers(),
                                        conc_workers()->active_workers(),
                                        Threads::number_of_non_daemon_threads());
   conc_workers()->set_active_workers(num_workers);
 

@@ -4489,11 +4489,11 @@
  public:
   // A value of 0 passed to n_workers will cause the number of
   // workers to be taken from the active workers in the work gang.
   CMSParRemarkTask(CMSCollector* collector,
                    CompactibleFreeListSpace* cms_space,
-                   int n_workers, FlexibleWorkGang* workers,
+                   uint n_workers, FlexibleWorkGang* workers,
                    OopTaskQueueSet* task_queues):
     CMSParMarkTask("Rescan roots and grey objects in parallel",
                    collector, n_workers),
     _cms_space(cms_space),
     _task_queues(task_queues),

@@ -4502,11 +4502,11 @@
   OopTaskQueueSet* task_queues() { return _task_queues; }
 
   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
 
   ParallelTaskTerminator* terminator() { return &_term; }
-  int n_workers() { return _n_workers; }
+  uint n_workers() { return _n_workers; }
 
   void work(uint worker_id);
 
  private:
   // ... of  dirty cards in old space

@@ -5065,11 +5065,11 @@
   FlexibleWorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   // Choose to use the number of GC workers most recently set
   // into "active_workers".  If active_workers is not set, set it
   // to ParallelGCThreads.
-  int n_workers = workers->active_workers();
+  uint n_workers = workers->active_workers();
   if (n_workers == 0) {
     assert(n_workers > 0, "Should have been set during scavenge");
     n_workers = ParallelGCThreads;
     workers->set_active_workers(n_workers);
   }

@@ -5431,11 +5431,11 @@
       // may have been a different number of threads doing the discovery
       // and a different number of discovered lists may have Ref objects.
       // That is OK as long as the Reference lists are balanced (see
       // balance_all_queues() and balance_queues()).
       GenCollectedHeap* gch = GenCollectedHeap::heap();
-      int active_workers = ParallelGCThreads;
+      uint active_workers = ParallelGCThreads;
       FlexibleWorkGang* workers = gch->workers();
       if (workers != NULL) {
         active_workers = workers->active_workers();
         // The expectation is that active_workers will have already
         // been set to a reasonable value.  If it has not been set,
< prev index next >