< prev index next >

src/share/vm/gc/g1/heapRegionManager.cpp

Print this page
rev 13131 : [mq]: 8183002-remove-unused-concurrent-parameter
   1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 310   uint last_index = (uint)_regions.get_index_by_address(range.last());
 311 
 312   // Ensure that each G1 region in the range is free, returning false if not.
 313   // Commit those that are not yet available, and keep count.
 314   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 315     if (!is_available(curr_index)) {
 316       commits++;
 317       expand_at(curr_index, 1, pretouch_workers);
 318     }
 319     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 320     if (!curr_region->is_free()) {
 321       return false;
 322     }
 323   }
 324 
 325   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();
 338   for (uint count = 0; count < n_regions; count++) {
 339     const uint index = (start_index + count) % n_regions;
 340     assert(index < n_regions, "sanity");
 341     // Skip over unavailable regions
 342     if (!is_available(index)) {
 343       continue;
 344     }
 345     HeapRegion* r = _regions.get_by_index(index);
 346     // We'll ignore regions already claimed.
 347     // However, if the iteration is specified as concurrent, the values for
 348     // is_starts_humongous and is_continues_humongous can not be trusted,
 349     // and we should just blindly iterate over regions regardless of their
 350     // humongous status.


   1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 310   uint last_index = (uint)_regions.get_index_by_address(range.last());
 311 
 312   // Ensure that each G1 region in the range is free, returning false if not.
 313   // Commit those that are not yet available, and keep count.
 314   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 315     if (!is_available(curr_index)) {
 316       commits++;
 317       expand_at(curr_index, 1, pretouch_workers);
 318     }
 319     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 320     if (!curr_region->is_free()) {
 321       return false;
 322     }
 323   }
 324 
 325   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();
 338   for (uint count = 0; count < n_regions; count++) {
 339     const uint index = (start_index + count) % n_regions;
 340     assert(index < n_regions, "sanity");
 341     // Skip over unavailable regions
 342     if (!is_available(index)) {
 343       continue;
 344     }
 345     HeapRegion* r = _regions.get_by_index(index);
 346     // We'll ignore regions already claimed.
 347     // However, if the iteration is specified as concurrent, the values for
 348     // is_starts_humongous and is_continues_humongous can not be trusted,
 349     // and we should just blindly iterate over regions regardless of their
 350     // humongous status.


< prev index next >