13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/space.inline.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/orderAccess.inline.hpp"
38
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
40
41 int HeapRegion::LogOfHRGrainBytes = 0;
42 int HeapRegion::LogOfHRGrainWords = 0;
43 size_t HeapRegion::GrainBytes = 0;
44 size_t HeapRegion::GrainWords = 0;
45 size_t HeapRegion::CardsPerRegion = 0;
46
47 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
48 HeapRegion* hr, ExtendedOopClosure* cl,
49 CardTableModRefBS::PrecisionStyle precision,
50 FilterKind fk) :
51 DirtyCardToOopClosure(hr, cl, precision, NULL),
52 _hr(hr), _fk(fk), _g1(g1) { }
53
54 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
55 OopClosure* oc) :
56 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
57
58 template<class ClosureType>
59 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
60 HeapRegion* hr,
61 HeapWord* cur, HeapWord* top) {
62 oop cur_oop = oop(cur);
63 int oop_size = cur_oop->size();
64 HeapWord* next_obj = cur + oop_size;
65 while (next_obj < top) {
66 // Keep filtering the remembered set.
67 if (!g1h->is_obj_dead(cur_oop, hr)) {
68 // Bottom lies entirely below top, so we can call the
69 // non-memRegion version of oop_iterate below.
70 cur_oop->oop_iterate(cl);
71 }
72 cur = next_obj;
73 cur_oop = oop(cur);
74 oop_size = cur_oop->size();
75 next_obj = cur + oop_size;
76 }
77 return cur;
78 }
79
80 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
81 HeapWord* bottom,
82 HeapWord* top) {
83 G1CollectedHeap* g1h = _g1;
84 int oop_size;
85 ExtendedOopClosure* cl2 = NULL;
86
87 FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
88 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
89
90 switch (_fk) {
91 case NoFilterKind: cl2 = _cl; break;
92 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
93 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
94 default: ShouldNotReachHere();
95 }
96
97 // Start filtering what we add to the remembered set. If the object is
98 // not considered dead, either because it is marked (in the mark bitmap)
99 // or it was allocated after marking finished, then we add it. Otherwise
100 // we can safely ignore the object.
101 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
102 oop_size = oop(bottom)->oop_iterate(cl2, mr);
103 } else {
104 oop_size = oop(bottom)->size();
105 }
106
107 bottom += oop_size;
108
109 if (bottom < top) {
110 // We replicate the loop below for several kinds of possible filters.
111 switch (_fk) {
112 case NoFilterKind:
113 bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
114 break;
115
116 case IntoCSFilterKind: {
117 FilterIntoCSClosure filt(this, g1h, _cl);
118 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
119 break;
120 }
121
122 case OutOfRegionFilterKind: {
123 FilterOutOfRegionClosure filt(_hr, _cl);
124 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
443 // We used to use "block_start_careful" here. But we're actually happy
444 // to update the BOT while we do this...
445 HeapWord* cur = block_start(mr.start());
446 mr = mr.intersection(used_region());
447 if (mr.is_empty()) return NULL;
448 // Otherwise, find the obj that extends onto mr.start().
449
450 assert(cur <= mr.start()
451 && (oop(cur)->klass_or_null() == NULL ||
452 cur + oop(cur)->size() > mr.start()),
453 "postcondition of block_start");
454 oop obj;
455 while (cur < mr.end()) {
456 obj = oop(cur);
457 if (obj->klass_or_null() == NULL) {
458 // Ran into an unparseable point.
459 return cur;
460 } else if (!g1h->is_obj_dead(obj)) {
461 cl->do_object(obj);
462 }
463 cur += obj->size();
464 }
465 return NULL;
466 }
467
468 HeapWord*
469 HeapRegion::
470 oops_on_card_seq_iterate_careful(MemRegion mr,
471 FilterOutOfRegionClosure* cl,
472 bool filter_young,
473 jbyte* card_ptr) {
474 // Currently, we should only have to clean the card if filter_young
475 // is true and vice versa.
476 if (filter_young) {
477 assert(card_ptr != NULL, "pre-condition");
478 } else {
479 assert(card_ptr == NULL, "pre-condition");
480 }
481 G1CollectedHeap* g1h = G1CollectedHeap::heap();
482
483 // If we're within a stop-world GC, then we might look at a card in a
515 // Cache the boundaries of the memory region in some const locals
516 HeapWord* const start = mr.start();
517 HeapWord* const end = mr.end();
518
519 // We used to use "block_start_careful" here. But we're actually happy
520 // to update the BOT while we do this...
521 HeapWord* cur = block_start(start);
522 assert(cur <= start, "Postcondition");
523
524 oop obj;
525
526 HeapWord* next = cur;
527 while (next <= start) {
528 cur = next;
529 obj = oop(cur);
530 if (obj->klass_or_null() == NULL) {
531 // Ran into an unparseable point.
532 return cur;
533 }
534 // Otherwise...
535 next = (cur + obj->size());
536 }
537
538 // If we finish the above loop...We have a parseable object that
539 // begins on or before the start of the memory region, and ends
540 // inside or spans the entire region.
541
542 assert(obj == oop(cur), "sanity");
543 assert(cur <= start &&
544 obj->klass_or_null() != NULL &&
545 (cur + obj->size()) > start,
546 "Loop postcondition");
547
548 if (!g1h->is_obj_dead(obj)) {
549 obj->oop_iterate(cl, mr);
550 }
551
552 while (cur < end) {
553 obj = oop(cur);
554 if (obj->klass_or_null() == NULL) {
555 // Ran into an unparseable point.
556 return cur;
557 };
558
559 // Otherwise:
560 next = (cur + obj->size());
561
562 if (!g1h->is_obj_dead(obj)) {
563 if (next < end || !obj->is_objArray()) {
564 // This object either does not span the MemRegion
565 // boundary, or if it does it's not an array.
566 // Apply closure to whole object.
567 obj->oop_iterate(cl);
568 } else {
569 // This obj is an array that spans the boundary.
570 // Stop at the boundary.
571 obj->oop_iterate(cl, mr);
572 }
573 }
574 cur = next;
575 }
576 return NULL;
577 }
578
579 // Code roots support
580
895 }
896 }
897 }
898 };
899
900 // This really ought to be commoned up into OffsetTableContigSpace somehow.
901 // We would need a mechanism to make that code skip dead objects.
902
903 void HeapRegion::verify(VerifyOption vo,
904 bool* failures) const {
905 G1CollectedHeap* g1 = G1CollectedHeap::heap();
906 *failures = false;
907 HeapWord* p = bottom();
908 HeapWord* prev_p = NULL;
909 VerifyLiveClosure vl_cl(g1, vo);
910 bool is_humongous = isHumongous();
911 bool do_bot_verify = !is_young();
912 size_t object_num = 0;
913 while (p < top()) {
914 oop obj = oop(p);
915 size_t obj_size = obj->size();
916 object_num += 1;
917
918 if (is_humongous != g1->isHumongous(obj_size)) {
919 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
920 SIZE_FORMAT" words) in a %shumongous region",
921 p, g1->isHumongous(obj_size) ? "" : "non-",
922 obj_size, is_humongous ? "" : "non-");
923 *failures = true;
924 return;
925 }
926
927 // If it returns false, verify_for_object() will output the
928 // appropriate messasge.
929 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
930 *failures = true;
931 return;
932 }
933
934 if (!g1->is_obj_dead_cond(obj, this, vo)) {
935 if (obj->is_oop()) {
1031 if (is_humongous && object_num > 1) {
1032 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1033 "but has "SIZE_FORMAT", objects",
1034 bottom(), end(), object_num);
1035 *failures = true;
1036 return;
1037 }
1038
1039 verify_strong_code_roots(vo, failures);
1040 }
1041
1042 void HeapRegion::verify() const {
1043 bool dummy = false;
1044 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1045 }
1046
1047 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1048 // away eventually.
1049
1050 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1051 ContiguousSpace::clear(mangle_space);
1052 _offsets.zero_bottom_entry();
1053 _offsets.initialize_threshold();
1054 }
1055
1056 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1057 Space::set_bottom(new_bottom);
1058 _offsets.set_bottom(new_bottom);
1059 }
1060
1061 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
1062 Space::set_end(new_end);
1063 _offsets.resize(new_end - bottom());
1064 }
1065
1066 void G1OffsetTableContigSpace::print() const {
1067 print_short();
1068 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
1069 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
1070 bottom(), top(), _offsets.threshold(), end());
1071 }
1072
1073 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
1074 return _offsets.initialize_threshold();
1075 }
1076
1077 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
1078 HeapWord* end) {
1079 _offsets.alloc_block(start, end);
1080 return _offsets.threshold();
1081 }
1082
1083 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1084 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1085 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1086 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1087 return top();
1088 else
1089 return ContiguousSpace::saved_mark_word();
1090 }
1091
1092 void G1OffsetTableContigSpace::record_top_and_timestamp() {
1093 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1094 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1095
1096 if (_gc_time_stamp < curr_gc_time_stamp) {
1097 // The order of these is important, as another thread might be
1098 // about to start scanning this region. If it does so after
1099 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1100 // will be false, and it will pick up top() as the high water mark
1101 // of region. If it does so after _gc_time_stamp = ..., then it
1102 // will pick up the right saved_mark_word() as the high water mark
1103 // of the region. Either way, the behavior will be correct.
1104 ContiguousSpace::set_saved_mark();
1105 OrderAccess::storestore();
1106 _gc_time_stamp = curr_gc_time_stamp;
1107 // No need to do another barrier to flush the writes above. If
1108 // this is called in parallel with other threads trying to
1109 // allocate into the region, the caller should call this while
1110 // holding a lock and when the lock is released the writes will be
1111 // flushed.
1112 }
1113 }
1114
1115 G1OffsetTableContigSpace::
1116 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1117 MemRegion mr) :
1118 _offsets(sharedOffsetArray, mr),
1119 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1120 _gc_time_stamp(0)
1121 {
1122 _offsets.set_space(this);
1123 // false ==> we'll do the clearing if there's clearing to be done.
1124 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
1125 _offsets.zero_bottom_entry();
1126 _offsets.initialize_threshold();
1127 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "gc_implementation/shared/liveRange.hpp"
34 #include "memory/genOopClosures.inline.hpp"
35 #include "memory/iterator.hpp"
36 #include "memory/space.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/orderAccess.inline.hpp"
39
40 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
41
42 int HeapRegion::LogOfHRGrainBytes = 0;
43 int HeapRegion::LogOfHRGrainWords = 0;
44 size_t HeapRegion::GrainBytes = 0;
45 size_t HeapRegion::GrainWords = 0;
46 size_t HeapRegion::CardsPerRegion = 0;
47
48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
49 HeapRegion* hr, ExtendedOopClosure* cl,
50 CardTableModRefBS::PrecisionStyle precision,
51 FilterKind fk) :
52 DirtyCardToOopClosure(hr, cl, precision, NULL),
53 _hr(hr), _fk(fk), _g1(g1) { }
54
55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
56 OopClosure* oc) :
57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
58
59 template<class ClosureType>
60 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
61 HeapRegion* hr,
62 HeapWord* cur, HeapWord* top) {
63 oop cur_oop = oop(cur);
64 size_t oop_size = hr->block_size(cur);
65 HeapWord* next_obj = cur + oop_size;
66 while (next_obj < top) {
67 // Keep filtering the remembered set.
68 if (!g1h->is_obj_dead(cur_oop, hr)) {
69 // Bottom lies entirely below top, so we can call the
70 // non-memRegion version of oop_iterate below.
71 cur_oop->oop_iterate(cl);
72 }
73 cur = next_obj;
74 cur_oop = oop(cur);
75 oop_size = hr->block_size(cur);
76 next_obj = cur + oop_size;
77 }
78 return cur;
79 }
80
81 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
82 HeapWord* bottom,
83 HeapWord* top) {
84 G1CollectedHeap* g1h = _g1;
85 size_t oop_size;
86 ExtendedOopClosure* cl2 = NULL;
87
88 FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
89 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
90
91 switch (_fk) {
92 case NoFilterKind: cl2 = _cl; break;
93 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
94 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
95 default: ShouldNotReachHere();
96 }
97
98 // Start filtering what we add to the remembered set. If the object is
99 // not considered dead, either because it is marked (in the mark bitmap)
100 // or it was allocated after marking finished, then we add it. Otherwise
101 // we can safely ignore the object.
102 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
103 oop_size = oop(bottom)->oop_iterate(cl2, mr);
104 } else {
105 oop_size = _hr->block_size(bottom);
106 }
107
108 bottom += oop_size;
109
110 if (bottom < top) {
111 // We replicate the loop below for several kinds of possible filters.
112 switch (_fk) {
113 case NoFilterKind:
114 bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
115 break;
116
117 case IntoCSFilterKind: {
118 FilterIntoCSClosure filt(this, g1h, _cl);
119 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
120 break;
121 }
122
123 case OutOfRegionFilterKind: {
124 FilterOutOfRegionClosure filt(_hr, _cl);
125 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
444 // We used to use "block_start_careful" here. But we're actually happy
445 // to update the BOT while we do this...
446 HeapWord* cur = block_start(mr.start());
447 mr = mr.intersection(used_region());
448 if (mr.is_empty()) return NULL;
449 // Otherwise, find the obj that extends onto mr.start().
450
451 assert(cur <= mr.start()
452 && (oop(cur)->klass_or_null() == NULL ||
453 cur + oop(cur)->size() > mr.start()),
454 "postcondition of block_start");
455 oop obj;
456 while (cur < mr.end()) {
457 obj = oop(cur);
458 if (obj->klass_or_null() == NULL) {
459 // Ran into an unparseable point.
460 return cur;
461 } else if (!g1h->is_obj_dead(obj)) {
462 cl->do_object(obj);
463 }
464 cur += block_size(cur);
465 }
466 return NULL;
467 }
468
469 HeapWord*
470 HeapRegion::
471 oops_on_card_seq_iterate_careful(MemRegion mr,
472 FilterOutOfRegionClosure* cl,
473 bool filter_young,
474 jbyte* card_ptr) {
475 // Currently, we should only have to clean the card if filter_young
476 // is true and vice versa.
477 if (filter_young) {
478 assert(card_ptr != NULL, "pre-condition");
479 } else {
480 assert(card_ptr == NULL, "pre-condition");
481 }
482 G1CollectedHeap* g1h = G1CollectedHeap::heap();
483
484 // If we're within a stop-world GC, then we might look at a card in a
516 // Cache the boundaries of the memory region in some const locals
517 HeapWord* const start = mr.start();
518 HeapWord* const end = mr.end();
519
520 // We used to use "block_start_careful" here. But we're actually happy
521 // to update the BOT while we do this...
522 HeapWord* cur = block_start(start);
523 assert(cur <= start, "Postcondition");
524
525 oop obj;
526
527 HeapWord* next = cur;
528 while (next <= start) {
529 cur = next;
530 obj = oop(cur);
531 if (obj->klass_or_null() == NULL) {
532 // Ran into an unparseable point.
533 return cur;
534 }
535 // Otherwise...
536 next = cur + block_size(cur);
537 }
538
539 // If we finish the above loop...We have a parseable object that
540 // begins on or before the start of the memory region, and ends
541 // inside or spans the entire region.
542
543 assert(obj == oop(cur), "sanity");
544 assert(cur <= start, "Loop postcondition");
545 assert(obj->klass_or_null() != NULL, "Loop postcondition");
546 assert((cur + block_size(cur)) > start, "Loop postcondition");
547
548 if (!g1h->is_obj_dead(obj)) {
549 obj->oop_iterate(cl, mr);
550 }
551
552 while (cur < end) {
553 obj = oop(cur);
554 if (obj->klass_or_null() == NULL) {
555 // Ran into an unparseable point.
556 return cur;
557 };
558
559 // Otherwise:
560 next = cur + block_size(cur);
561
562 if (!g1h->is_obj_dead(obj)) {
563 if (next < end || !obj->is_objArray()) {
564 // This object either does not span the MemRegion
565 // boundary, or if it does it's not an array.
566 // Apply closure to whole object.
567 obj->oop_iterate(cl);
568 } else {
569 // This obj is an array that spans the boundary.
570 // Stop at the boundary.
571 obj->oop_iterate(cl, mr);
572 }
573 }
574 cur = next;
575 }
576 return NULL;
577 }
578
579 // Code roots support
580
895 }
896 }
897 }
898 };
899
900 // This really ought to be commoned up into OffsetTableContigSpace somehow.
901 // We would need a mechanism to make that code skip dead objects.
902
903 void HeapRegion::verify(VerifyOption vo,
904 bool* failures) const {
905 G1CollectedHeap* g1 = G1CollectedHeap::heap();
906 *failures = false;
907 HeapWord* p = bottom();
908 HeapWord* prev_p = NULL;
909 VerifyLiveClosure vl_cl(g1, vo);
910 bool is_humongous = isHumongous();
911 bool do_bot_verify = !is_young();
912 size_t object_num = 0;
913 while (p < top()) {
914 oop obj = oop(p);
915 size_t obj_size = block_size(p);
916 object_num += 1;
917
918 if (is_humongous != g1->isHumongous(obj_size)) {
919 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
920 SIZE_FORMAT" words) in a %shumongous region",
921 p, g1->isHumongous(obj_size) ? "" : "non-",
922 obj_size, is_humongous ? "" : "non-");
923 *failures = true;
924 return;
925 }
926
927 // If it returns false, verify_for_object() will output the
928 // appropriate messasge.
929 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
930 *failures = true;
931 return;
932 }
933
934 if (!g1->is_obj_dead_cond(obj, this, vo)) {
935 if (obj->is_oop()) {
1031 if (is_humongous && object_num > 1) {
1032 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1033 "but has "SIZE_FORMAT", objects",
1034 bottom(), end(), object_num);
1035 *failures = true;
1036 return;
1037 }
1038
1039 verify_strong_code_roots(vo, failures);
1040 }
1041
1042 void HeapRegion::verify() const {
1043 bool dummy = false;
1044 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1045 }
1046
1047 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1048 // away eventually.
1049
1050 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1051 set_top(bottom());
1052 CompactibleSpace::clear(mangle_space);
1053 _offsets.zero_bottom_entry();
1054 _offsets.initialize_threshold();
1055 }
1056
1057 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1058 Space::set_bottom(new_bottom);
1059 _offsets.set_bottom(new_bottom);
1060 }
1061
1062 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
1063 Space::set_end(new_end);
1064 _offsets.resize(new_end - bottom());
1065 }
1066
1067 void G1OffsetTableContigSpace::print() const {
1068 print_short();
1069 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
1070 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
1071 bottom(), top(), _offsets.threshold(), end());
1072 }
1073
1074 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
1075 return _offsets.initialize_threshold();
1076 }
1077
1078 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
1079 HeapWord* end) {
1080 _offsets.alloc_block(start, end);
1081 return _offsets.threshold();
1082 }
1083
1084 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1085 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1086 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1087 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1088 return top();
1089 else
1090 return Space::saved_mark_word();
1091 }
1092
1093 void G1OffsetTableContigSpace::record_top_and_timestamp() {
1094 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1095 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1096
1097 if (_gc_time_stamp < curr_gc_time_stamp) {
1098 // The order of these is important, as another thread might be
1099 // about to start scanning this region. If it does so after
1100 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1101 // will be false, and it will pick up top() as the high water mark
1102 // of region. If it does so after _gc_time_stamp = ..., then it
1103 // will pick up the right saved_mark_word() as the high water mark
1104 // of the region. Either way, the behavior will be correct.
1105 Space::set_saved_mark_word(top());
1106 OrderAccess::storestore();
1107 _gc_time_stamp = curr_gc_time_stamp;
1108 // No need to do another barrier to flush the writes above. If
1109 // this is called in parallel with other threads trying to
1110 // allocate into the region, the caller should call this while
1111 // holding a lock and when the lock is released the writes will be
1112 // flushed.
1113 }
1114 }
1115
1116 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
1117 object_iterate(blk);
1118 }
1119
1120 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
1121 HeapWord* p = bottom();
1122 if (!block_is_obj(p)) {
1123 p += block_size(p);
1124 }
1125 while (p < top()) {
1126 blk->do_object(oop(p));
1127 p += block_size(p);
1128 }
1129 }
1130
1131 #define block_is_always_obj(q) true
1132 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
1133 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
1134 }
1135 #undef block_is_always_obj
1136
1137 G1OffsetTableContigSpace::
1138 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1139 MemRegion mr) :
1140 _top(bottom()),
1141 _offsets(sharedOffsetArray, mr),
1142 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1143 _gc_time_stamp(0)
1144 {
1145 _offsets.set_space(this);
1146 // false ==> we'll do the clearing if there's clearing to be done.
1147 CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
1148 _offsets.zero_bottom_entry();
1149 _offsets.initialize_threshold();
1150 }
|