10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1ConcurrentRefine.hpp"
28 #include "gc/g1/heapRegion.hpp"
29 #include "gc/g1/heapRegionManager.inline.hpp"
30 #include "gc/g1/heapRegionSet.inline.hpp"
31 #include "memory/allocation.hpp"
32 #include "utilities/bitMap.inline.hpp"
33
34 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
35 public:
36 void check_mt_safety() {
37 // Master Free List MT safety protocol:
38 // (a) If we're at a safepoint, operations on the master free list
39 // should be invoked by either the VM thread (which will serialize
40 // them) or by the GC workers while holding the
41 // FreeList_lock.
42 // (b) If we're not at a safepoint, operations on the master free
43 // list should be invoked while holding the Heap_lock.
44
45 if (SafepointSynchronize::is_at_safepoint()) {
46 guarantee(Thread::current()->is_VM_thread() ||
47 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
48 } else {
49 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
50 }
51 }
52 bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
53 const char* get_description() { return "Free Regions"; }
54 };
55
56 HeapRegionManager::HeapRegionManager() :
57 _regions(), _heap_mapper(NULL),
58 _prev_bitmap_mapper(NULL),
59 _next_bitmap_mapper(NULL),
60 _bot_mapper(NULL),
61 _cardtable_mapper(NULL),
62 _card_counts_mapper(NULL),
63 _free_list("Free list", new MasterFreeRegionListChecker()),
64 _available_map(mtGC),
65 _num_committed(0),
66 _allocated_heapregions_length(0)
67 { }
68
69 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
70 G1RegionToSpaceMapper* prev_bitmap,
71 G1RegionToSpaceMapper* next_bitmap,
72 G1RegionToSpaceMapper* bot,
73 G1RegionToSpaceMapper* cardtable,
74 G1RegionToSpaceMapper* card_counts) {
75 _allocated_heapregions_length = 0;
76
77 _heap_mapper = heap_storage;
78
79 _prev_bitmap_mapper = prev_bitmap;
80 _next_bitmap_mapper = next_bitmap;
81
82 _bot_mapper = bot;
83 _cardtable_mapper = cardtable;
84
85 _card_counts_mapper = card_counts;
86
87 MemRegion reserved = heap_storage->reserved();
88 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
497 // this method may be called, we have only completed allocation of the regions,
498 // but not put into a region set.
499 prev_committed = true;
500 prev_end = hr->end();
501 }
502 for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
503 guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
504 }
505
506 guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
507 _free_list.verify();
508 }
509
510 #ifndef PRODUCT
511 void HeapRegionManager::verify_optional() {
512 verify();
513 }
514 #endif // PRODUCT
515
516 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
517 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
518 assert(n_workers > 0, "Need at least one worker.");
519 uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
520 memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
521 _claims = new_claims;
522 }
523
524 HeapRegionClaimer::~HeapRegionClaimer() {
525 if (_claims != NULL) {
526 FREE_C_HEAP_ARRAY(uint, _claims);
527 }
528 }
529
530 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
531 assert(worker_id < _n_workers, "Invalid worker_id.");
532 return _n_regions * worker_id / _n_workers;
533 }
534
535 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
536 assert(region_index < _n_regions, "Invalid index.");
537 return _claims[region_index] == Claimed;
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1ConcurrentRefine.hpp"
28 #include "gc/g1/heapRegion.hpp"
29 #include "gc/g1/heapRegionManager.inline.hpp"
30 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
31 #include "gc/g1/heapRegionSet.inline.hpp"
32 #include "gc/shared/collectorPolicy.hpp"
33 #include "memory/allocation.hpp"
34 #include "utilities/bitMap.inline.hpp"
35
36 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
37 public:
38 void check_mt_safety() {
39 // Master Free List MT safety protocol:
40 // (a) If we're at a safepoint, operations on the master free list
41 // should be invoked by either the VM thread (which will serialize
42 // them) or by the GC workers while holding the
43 // FreeList_lock.
44 // (b) If we're not at a safepoint, operations on the master free
45 // list should be invoked while holding the Heap_lock.
46
47 if (SafepointSynchronize::is_at_safepoint()) {
48 guarantee(Thread::current()->is_VM_thread() ||
49 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
50 } else {
51 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
52 }
53 }
54 bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
55 const char* get_description() { return "Free Regions"; }
56 };
57
58 HeapRegionManager::HeapRegionManager() :
59 _prev_bitmap_mapper(NULL),
60 _next_bitmap_mapper(NULL),
61 _bot_mapper(NULL),
62 _cardtable_mapper(NULL),
63 _card_counts_mapper(NULL),
64 _available_map(mtGC),
65 _num_committed(0),
66 _allocated_heapregions_length(0),
67 _regions(), _heap_mapper(NULL),
68 _free_list("Free list", new MasterFreeRegionListChecker())
69 { }
70
71 HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, CollectorPolicy* policy) {
72 if (heap->is_hetero_heap()) {
73 return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
74 }
75 return new HeapRegionManager();
76 }
77
78 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
79 G1RegionToSpaceMapper* prev_bitmap,
80 G1RegionToSpaceMapper* next_bitmap,
81 G1RegionToSpaceMapper* bot,
82 G1RegionToSpaceMapper* cardtable,
83 G1RegionToSpaceMapper* card_counts) {
84 _allocated_heapregions_length = 0;
85
86 _heap_mapper = heap_storage;
87
88 _prev_bitmap_mapper = prev_bitmap;
89 _next_bitmap_mapper = next_bitmap;
90
91 _bot_mapper = bot;
92 _cardtable_mapper = cardtable;
93
94 _card_counts_mapper = card_counts;
95
96 MemRegion reserved = heap_storage->reserved();
97 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
506 // this method may be called, we have only completed allocation of the regions,
507 // but not put into a region set.
508 prev_committed = true;
509 prev_end = hr->end();
510 }
511 for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
512 guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
513 }
514
515 guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
516 _free_list.verify();
517 }
518
519 #ifndef PRODUCT
520 void HeapRegionManager::verify_optional() {
521 verify();
522 }
523 #endif // PRODUCT
524
525 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
526 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
527 assert(n_workers > 0, "Need at least one worker.");
528 uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
529 memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
530 _claims = new_claims;
531 }
532
533 HeapRegionClaimer::~HeapRegionClaimer() {
534 if (_claims != NULL) {
535 FREE_C_HEAP_ARRAY(uint, _claims);
536 }
537 }
538
539 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
540 assert(worker_id < _n_workers, "Invalid worker_id.");
541 return _n_regions * worker_id / _n_workers;
542 }
543
544 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
545 assert(region_index < _n_regions, "Invalid index.");
546 return _claims[region_index] == Claimed;
|