1010 addr_2, b_start_2, p);
1011 *failures = true;
1012 return;
1013 }
1014 }
1015
1016 // Look up an address between top and end
1017 size_t diff = pointer_delta(the_end, p) / 2;
1018 HeapWord* addr_3 = p + diff;
1019 if (addr_3 < the_end) {
1020 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
1021 if (b_start_3 != p) {
1022 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
1023 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1024 addr_3, b_start_3, p);
1025 *failures = true;
1026 return;
1027 }
1028 }
1029
1030 // Loook up end - 1
1031 HeapWord* addr_4 = the_end - 1;
1032 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
1033 if (b_start_4 != p) {
1034 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
1035 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1036 addr_4, b_start_4, p);
1037 *failures = true;
1038 return;
1039 }
1040 }
1041
1042 if (is_humongous && object_num > 1) {
1043 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1044 "but has "SIZE_FORMAT", objects",
1045 bottom(), end(), object_num);
1046 *failures = true;
1047 return;
1048 }
1049
1050 verify_strong_code_roots(vo, failures);
1094 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1095 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1096 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1097 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1098 return top();
1099 else
1100 return ContiguousSpace::saved_mark_word();
1101 }
1102
1103 void G1OffsetTableContigSpace::set_saved_mark() {
1104 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1105 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1106
1107 if (_gc_time_stamp < curr_gc_time_stamp) {
1108 // The order of these is important, as another thread might be
1109 // about to start scanning this region. If it does so after
1110 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1111 // will be false, and it will pick up top() as the high water mark
1112 // of region. If it does so after _gc_time_stamp = ..., then it
1113 // will pick up the right saved_mark_word() as the high water mark
1114 // of the region. Either way, the behaviour will be correct.
1115 ContiguousSpace::set_saved_mark();
1116 OrderAccess::storestore();
1117 _gc_time_stamp = curr_gc_time_stamp;
1118 // No need to do another barrier to flush the writes above. If
1119 // this is called in parallel with other threads trying to
1120 // allocate into the region, the caller should call this while
1121 // holding a lock and when the lock is released the writes will be
1122 // flushed.
1123 }
1124 }
1125
1126 G1OffsetTableContigSpace::
1127 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1128 MemRegion mr) :
1129 _offsets(sharedOffsetArray, mr),
1130 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1131 _gc_time_stamp(0)
1132 {
1133 _offsets.set_space(this);
1134 // false ==> we'll do the clearing if there's clearing to be done.
|
1010 addr_2, b_start_2, p);
1011 *failures = true;
1012 return;
1013 }
1014 }
1015
1016 // Look up an address between top and end
1017 size_t diff = pointer_delta(the_end, p) / 2;
1018 HeapWord* addr_3 = p + diff;
1019 if (addr_3 < the_end) {
1020 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
1021 if (b_start_3 != p) {
1022 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
1023 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1024 addr_3, b_start_3, p);
1025 *failures = true;
1026 return;
1027 }
1028 }
1029
1030 // Look up end - 1
1031 HeapWord* addr_4 = the_end - 1;
1032 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
1033 if (b_start_4 != p) {
1034 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
1035 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1036 addr_4, b_start_4, p);
1037 *failures = true;
1038 return;
1039 }
1040 }
1041
1042 if (is_humongous && object_num > 1) {
1043 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1044 "but has "SIZE_FORMAT", objects",
1045 bottom(), end(), object_num);
1046 *failures = true;
1047 return;
1048 }
1049
1050 verify_strong_code_roots(vo, failures);
1094 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1095 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1096 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1097 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1098 return top();
1099 else
1100 return ContiguousSpace::saved_mark_word();
1101 }
1102
1103 void G1OffsetTableContigSpace::set_saved_mark() {
1104 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1105 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1106
1107 if (_gc_time_stamp < curr_gc_time_stamp) {
1108 // The order of these is important, as another thread might be
1109 // about to start scanning this region. If it does so after
1110 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1111 // will be false, and it will pick up top() as the high water mark
1112 // of region. If it does so after _gc_time_stamp = ..., then it
1113 // will pick up the right saved_mark_word() as the high water mark
1114 // of the region. Either way, the behavior will be correct.
1115 ContiguousSpace::set_saved_mark();
1116 OrderAccess::storestore();
1117 _gc_time_stamp = curr_gc_time_stamp;
1118 // No need to do another barrier to flush the writes above. If
1119 // this is called in parallel with other threads trying to
1120 // allocate into the region, the caller should call this while
1121 // holding a lock and when the lock is released the writes will be
1122 // flushed.
1123 }
1124 }
1125
1126 G1OffsetTableContigSpace::
1127 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1128 MemRegion mr) :
1129 _offsets(sharedOffsetArray, mr),
1130 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1131 _gc_time_stamp(0)
1132 {
1133 _offsets.set_space(this);
1134 // false ==> we'll do the clearing if there's clearing to be done.
|