122 // the old_gen promotion labs, which cannot be safely parsed.
123
124 // Do not call this method if the space is empty.
125 // It is a waste to start tasks and get here only to
126 // do no work. If this method needs to be called
127 // when the space is empty, fix the calculation of
128 // end_card to allow sp_top == sp->bottom().
129
130 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
131 MutableSpace* sp,
132 HeapWord* space_top,
133 PSPromotionManager* pm,
134 uint stripe_number,
135 uint stripe_total) {
136 int ssize = 128; // Naked constant! Work unit = 64k.
137 int dirty_card_count = 0;
138
139 // It is a waste to get here if empty.
140 assert(sp->bottom() < sp->top(), "Should not be called if empty");
141 oop* sp_top = (oop*)space_top;
142 jbyte* start_card = byte_for(sp->bottom());
143 jbyte* end_card = byte_for(sp_top - 1) + 1;
144 oop* last_scanned = NULL; // Prevent scanning objects more than once
145 // The width of the stripe ssize*stripe_total must be
146 // consistent with the number of stripes so that the complete slice
147 // is covered.
148 size_t slice_width = ssize * stripe_total;
149 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
150 jbyte* worker_start_card = slice + stripe_number * ssize;
151 if (worker_start_card >= end_card)
152 return; // We're done.
153
154 jbyte* worker_end_card = worker_start_card + ssize;
155 if (worker_end_card > end_card)
156 worker_end_card = end_card;
157
158 // We do not want to scan objects more than once. In order to accomplish
159 // this, we assert that any object with an object head inside our 'slice'
160 // belongs to us. We may need to extend the range of scanned cards if the
161 // last object continues into the next 'slice'.
162 //
163 // Note! ending cards are exclusive!
164 HeapWord* slice_start = addr_for(worker_start_card);
165 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
166
167 #ifdef ASSERT
168 if (GCWorkerDelayMillis > 0) {
169 // Delay 1 worker so that it proceeds after all the work
170 // has been completed.
171 if (stripe_number < 2) {
172 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
173 }
174 }
191 if (slice_end < (HeapWord*)sp_top) {
192 // The subtraction is important! An object may start precisely at slice_end.
193 HeapWord* last_object = start_array->object_start(slice_end - 1);
194 slice_end = last_object + oop(last_object)->size();
195 // worker_end_card is exclusive, so bump it one past the end of last_object's
196 // covered span.
197 worker_end_card = byte_for(slice_end) + 1;
198
199 if (worker_end_card > end_card)
200 worker_end_card = end_card;
201 }
202
203 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
204 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
205 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
206 // Note that worker_start_card >= worker_end_card is legal, and happens when
207 // an object spans an entire slice.
208 assert(worker_start_card <= end_card, "worker start card beyond end card");
209 assert(worker_end_card <= end_card, "worker end card beyond end card");
210
211 jbyte* current_card = worker_start_card;
212 while (current_card < worker_end_card) {
213 // Find an unclean card.
214 while (current_card < worker_end_card && card_is_clean(*current_card)) {
215 current_card++;
216 }
217 jbyte* first_unclean_card = current_card;
218
219 // Find the end of a run of contiguous unclean cards
220 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
221 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
222 current_card++;
223 }
224
225 if (current_card < worker_end_card) {
226 // Some objects may be large enough to span several cards. If such
227 // an object has more than one dirty card, separated by a clean card,
228 // we will attempt to scan it twice. The test against "last_scanned"
229 // prevents the redundant object scan, but it does not prevent newly
230 // marked cards from being cleaned.
231 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
232 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
233 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
234 jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
235 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
236 if (ending_card_of_last_object > current_card) {
237 // This means the object spans the next complete card.
238 // We need to bump the current_card to ending_card_of_last_object
239 current_card = ending_card_of_last_object;
240 }
241 }
242 }
243 jbyte* following_clean_card = current_card;
244
245 if (first_unclean_card < worker_end_card) {
246 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
247 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
248 // "p" should always be >= "last_scanned" because newly GC dirtied
249 // cards are no longer scanned again (see comment at end
250 // of loop on the increment of "current_card"). Test that
251 // hypothesis before removing this code.
252 // If this code is removed, deal with the first time through
253 // the loop when the last_scanned is the object starting in
254 // the previous slice.
255 assert((p >= last_scanned) ||
256 (last_scanned == first_object_within_slice),
257 "Should no longer be possible");
258 if (p < last_scanned) {
259 // Avoid scanning more than once; this can happen because
260 // newgen cards set by GC may a different set than the
261 // originally dirty set
262 p = last_scanned;
263 }
329 }
330
331 // This should be called immediately after a scavenge, before mutators resume.
332 void CardTableExtension::verify_all_young_refs_precise() {
333 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
334 PSOldGen* old_gen = heap->old_gen();
335
336 CheckForPreciseMarks check(
337 heap->young_gen(),
338 barrier_set_cast<CardTableExtension>(heap->barrier_set()));
339
340 old_gen->oop_iterate_no_header(&check);
341
342 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
343 }
344
345 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
346 CardTableExtension* card_table =
347 barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
348
349 jbyte* bot = card_table->byte_for(mr.start());
350 jbyte* top = card_table->byte_for(mr.end());
351 while(bot <= top) {
352 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
353 if (*bot == verify_card)
354 *bot = youngergen_card;
355 bot++;
356 }
357 }
358
359 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
360 jbyte* p = byte_for(addr);
361 jbyte val = *p;
362
363 if (card_is_dirty(val))
364 return true;
365
366 if (card_is_newgen(val))
367 return true;
368
369 if (card_is_clean(val))
370 return false;
371
372 assert(false, "Found unhandled card mark type");
373
374 return false;
375 }
376
377 // Also includes verify_card
378 bool CardTableExtension::addr_is_marked_precise(void *addr) {
379 jbyte* p = byte_for(addr);
380 jbyte val = *p;
381
382 if (card_is_newgen(val))
383 return true;
384
385 if (card_is_verify(val))
386 return true;
387
388 if (card_is_clean(val))
389 return false;
390
391 if (card_is_dirty(val))
392 return false;
393
394 assert(false, "Found unhandled card mark type");
395
396 return false;
397 }
398
399 // Assumes that only the base or the end changes. This allows indentification
459
460 // Commit new or uncommit old pages, if necessary.
461 if (resize_commit_uncommit(changed_region, new_region)) {
462 // Set the new start of the committed region
463 resize_update_committed_table(changed_region, new_region);
464 }
465
466 // Update card table entries
467 resize_update_card_table_entries(changed_region, new_region);
468
469 // Update the covered region
470 resize_update_covered_table(changed_region, new_region);
471
472 int ind = changed_region;
473 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
474 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
475 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
476 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
477 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
478 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
479 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
480 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
481 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
482
483 debug_only(verify_guard();)
484 }
485
486 bool CardTableExtension::resize_commit_uncommit(int changed_region,
487 MemRegion new_region) {
488 bool result = false;
489 // Commit new or uncommit old pages, if necessary.
490 MemRegion cur_committed = _committed[changed_region];
491 assert(_covered[changed_region].end() == new_region.end(),
492 "The ends of the regions are expected to match");
493 // Extend the start of this _committed region to
494 // to cover the start of any previous _committed region.
495 // This forms overlapping regions, but never interior regions.
496 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
497 if (min_prev_start < cur_committed.start()) {
498 // Only really need to set start of "cur_committed" to
499 // the new start (min_prev_start) but assertion checking code
500 // below use cur_committed.end() so make it correct.
501 MemRegion new_committed =
502 MemRegion(min_prev_start, cur_committed.end());
503 cur_committed = new_committed;
504 }
505 #ifdef ASSERT
506 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
507 assert(cur_committed.start() ==
508 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
509 os::vm_page_size()),
510 "Starts should have proper alignment");
511 #endif
512
513 jbyte* new_start = byte_for(new_region.start());
514 // Round down because this is for the start address
515 HeapWord* new_start_aligned =
516 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
517 // The guard page is always committed and should not be committed over.
518 // This method is used in cases where the generation is growing toward
519 // lower addresses but the guard region is still at the end of the
520 // card table. That still makes sense when looking for writes
521 // off the end of the card table.
522 if (new_start_aligned < cur_committed.start()) {
523 // Expand the committed region
524 //
525 // Case A
526 // |+ guard +|
527 // |+ cur committed +++++++++|
528 // |+ new committed +++++++++++++++++|
529 //
530 // Case B
531 // |+ guard +|
532 // |+ cur committed +|
533 // |+ new committed +++++++|
566 if (!uncommit_region.is_empty()) {
567 if (!os::uncommit_memory((char*)uncommit_region.start(),
568 uncommit_region.byte_size())) {
569 // If the uncommit fails, ignore it. Let the
570 // committed table resizing go even though the committed
571 // table will over state the committed space.
572 }
573 }
574 #else
575 assert(!result, "Should be false with current workaround");
576 #endif
577 }
578 assert(_committed[changed_region].end() == cur_committed.end(),
579 "end should not change");
580 return result;
581 }
582
583 void CardTableExtension::resize_update_committed_table(int changed_region,
584 MemRegion new_region) {
585
586 jbyte* new_start = byte_for(new_region.start());
587 // Set the new start of the committed region
588 HeapWord* new_start_aligned =
589 (HeapWord*)align_size_down((uintptr_t)new_start,
590 os::vm_page_size());
591 MemRegion new_committed = MemRegion(new_start_aligned,
592 _committed[changed_region].end());
593 _committed[changed_region] = new_committed;
594 _committed[changed_region].set_start(new_start_aligned);
595 }
596
597 void CardTableExtension::resize_update_card_table_entries(int changed_region,
598 MemRegion new_region) {
599 debug_only(verify_guard();)
600 MemRegion original_covered = _covered[changed_region];
601 // Initialize the card entries. Only consider the
602 // region covered by the card table (_whole_heap)
603 jbyte* entry;
604 if (new_region.start() < _whole_heap.start()) {
605 entry = byte_for(_whole_heap.start());
606 } else {
607 entry = byte_for(new_region.start());
608 }
609 jbyte* end = byte_for(original_covered.start());
610 // If _whole_heap starts at the original covered regions start,
611 // this loop will not execute.
612 while (entry < end) { *entry++ = clean_card; }
613 }
614
615 void CardTableExtension::resize_update_covered_table(int changed_region,
616 MemRegion new_region) {
617 // Update the covered region
618 _covered[changed_region].set_start(new_region.start());
619 _covered[changed_region].set_word_size(new_region.word_size());
620
621 // reorder regions. There should only be at most 1 out
622 // of order.
623 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
624 if (_covered[i].start() < _covered[i-1].start()) {
625 MemRegion covered_mr = _covered[i-1];
626 _covered[i-1] = _covered[i];
627 _covered[i] = covered_mr;
628 MemRegion committed_mr = _committed[i-1];
629 _committed[i-1] = _committed[i];
|
122 // the old_gen promotion labs, which cannot be safely parsed.
123
124 // Do not call this method if the space is empty.
125 // It is a waste to start tasks and get here only to
126 // do no work. If this method needs to be called
127 // when the space is empty, fix the calculation of
128 // end_card to allow sp_top == sp->bottom().
129
130 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
131 MutableSpace* sp,
132 HeapWord* space_top,
133 PSPromotionManager* pm,
134 uint stripe_number,
135 uint stripe_total) {
136 int ssize = 128; // Naked constant! Work unit = 64k.
137 int dirty_card_count = 0;
138
139 // It is a waste to get here if empty.
140 assert(sp->bottom() < sp->top(), "Should not be called if empty");
141 oop* sp_top = (oop*)space_top;
142 volatile jbyte* start_card = byte_for(sp->bottom());
143 volatile jbyte* end_card = byte_for(sp_top - 1) + 1;
144 oop* last_scanned = NULL; // Prevent scanning objects more than once
145 // The width of the stripe ssize*stripe_total must be
146 // consistent with the number of stripes so that the complete slice
147 // is covered.
148 size_t slice_width = ssize * stripe_total;
149 for (volatile jbyte* slice = start_card; slice < end_card; slice += slice_width) {
150 volatile jbyte* worker_start_card = slice + stripe_number * ssize;
151 if (worker_start_card >= end_card)
152 return; // We're done.
153
154 volatile jbyte* worker_end_card = worker_start_card + ssize;
155 if (worker_end_card > end_card)
156 worker_end_card = end_card;
157
158 // We do not want to scan objects more than once. In order to accomplish
159 // this, we assert that any object with an object head inside our 'slice'
160 // belongs to us. We may need to extend the range of scanned cards if the
161 // last object continues into the next 'slice'.
162 //
163 // Note! ending cards are exclusive!
164 HeapWord* slice_start = addr_for(worker_start_card);
165 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
166
167 #ifdef ASSERT
168 if (GCWorkerDelayMillis > 0) {
169 // Delay 1 worker so that it proceeds after all the work
170 // has been completed.
171 if (stripe_number < 2) {
172 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
173 }
174 }
191 if (slice_end < (HeapWord*)sp_top) {
192 // The subtraction is important! An object may start precisely at slice_end.
193 HeapWord* last_object = start_array->object_start(slice_end - 1);
194 slice_end = last_object + oop(last_object)->size();
195 // worker_end_card is exclusive, so bump it one past the end of last_object's
196 // covered span.
197 worker_end_card = byte_for(slice_end) + 1;
198
199 if (worker_end_card > end_card)
200 worker_end_card = end_card;
201 }
202
203 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
204 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
205 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
206 // Note that worker_start_card >= worker_end_card is legal, and happens when
207 // an object spans an entire slice.
208 assert(worker_start_card <= end_card, "worker start card beyond end card");
209 assert(worker_end_card <= end_card, "worker end card beyond end card");
210
211 volatile jbyte* current_card = worker_start_card;
212 while (current_card < worker_end_card) {
213 // Find an unclean card.
214 while (current_card < worker_end_card && card_is_clean(*current_card)) {
215 current_card++;
216 }
217 volatile jbyte* first_unclean_card = current_card;
218
219 // Find the end of a run of contiguous unclean cards
220 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
221 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
222 current_card++;
223 }
224
225 if (current_card < worker_end_card) {
226 // Some objects may be large enough to span several cards. If such
227 // an object has more than one dirty card, separated by a clean card,
228 // we will attempt to scan it twice. The test against "last_scanned"
229 // prevents the redundant object scan, but it does not prevent newly
230 // marked cards from being cleaned.
231 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
232 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
233 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
234 volatile jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
235 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
236 if (ending_card_of_last_object > current_card) {
237 // This means the object spans the next complete card.
238 // We need to bump the current_card to ending_card_of_last_object
239 current_card = ending_card_of_last_object;
240 }
241 }
242 }
243 volatile jbyte* following_clean_card = current_card;
244
245 if (first_unclean_card < worker_end_card) {
246 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
247 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
248 // "p" should always be >= "last_scanned" because newly GC dirtied
249 // cards are no longer scanned again (see comment at end
250 // of loop on the increment of "current_card"). Test that
251 // hypothesis before removing this code.
252 // If this code is removed, deal with the first time through
253 // the loop when the last_scanned is the object starting in
254 // the previous slice.
255 assert((p >= last_scanned) ||
256 (last_scanned == first_object_within_slice),
257 "Should no longer be possible");
258 if (p < last_scanned) {
259 // Avoid scanning more than once; this can happen because
260 // newgen cards set by GC may a different set than the
261 // originally dirty set
262 p = last_scanned;
263 }
329 }
330
331 // This should be called immediately after a scavenge, before mutators resume.
332 void CardTableExtension::verify_all_young_refs_precise() {
333 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
334 PSOldGen* old_gen = heap->old_gen();
335
336 CheckForPreciseMarks check(
337 heap->young_gen(),
338 barrier_set_cast<CardTableExtension>(heap->barrier_set()));
339
340 old_gen->oop_iterate_no_header(&check);
341
342 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
343 }
344
345 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
346 CardTableExtension* card_table =
347 barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
348
349 volatile jbyte* bot = card_table->byte_for(mr.start());
350 volatile jbyte* top = card_table->byte_for(mr.end());
351 while(bot <= top) {
352 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
353 if (*bot == verify_card)
354 *bot = youngergen_card;
355 bot++;
356 }
357 }
358
359 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
360 volatile jbyte* p = byte_for(addr);
361 jbyte val = *p;
362
363 if (card_is_dirty(val))
364 return true;
365
366 if (card_is_newgen(val))
367 return true;
368
369 if (card_is_clean(val))
370 return false;
371
372 assert(false, "Found unhandled card mark type");
373
374 return false;
375 }
376
377 // Also includes verify_card
378 bool CardTableExtension::addr_is_marked_precise(void *addr) {
379 volatile jbyte* p = byte_for(addr);
380 jbyte val = *p;
381
382 if (card_is_newgen(val))
383 return true;
384
385 if (card_is_verify(val))
386 return true;
387
388 if (card_is_clean(val))
389 return false;
390
391 if (card_is_dirty(val))
392 return false;
393
394 assert(false, "Found unhandled card mark type");
395
396 return false;
397 }
398
399 // Assumes that only the base or the end changes. This allows indentification
459
460 // Commit new or uncommit old pages, if necessary.
461 if (resize_commit_uncommit(changed_region, new_region)) {
462 // Set the new start of the committed region
463 resize_update_committed_table(changed_region, new_region);
464 }
465
466 // Update card table entries
467 resize_update_card_table_entries(changed_region, new_region);
468
469 // Update the covered region
470 resize_update_covered_table(changed_region, new_region);
471
472 int ind = changed_region;
473 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
474 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
475 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
476 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
477 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
478 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
479 p2i((jbyte*)byte_for(_covered[ind].start())), p2i((jbyte*)byte_for(_covered[ind].last())));
480 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
481 p2i(addr_for((volatile jbyte*) _committed[ind].start())), p2i(addr_for((volatile jbyte*) _committed[ind].last())));
482
483 debug_only(verify_guard();)
484 }
485
486 bool CardTableExtension::resize_commit_uncommit(int changed_region,
487 MemRegion new_region) {
488 bool result = false;
489 // Commit new or uncommit old pages, if necessary.
490 MemRegion cur_committed = _committed[changed_region];
491 assert(_covered[changed_region].end() == new_region.end(),
492 "The ends of the regions are expected to match");
493 // Extend the start of this _committed region to
494 // to cover the start of any previous _committed region.
495 // This forms overlapping regions, but never interior regions.
496 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
497 if (min_prev_start < cur_committed.start()) {
498 // Only really need to set start of "cur_committed" to
499 // the new start (min_prev_start) but assertion checking code
500 // below use cur_committed.end() so make it correct.
501 MemRegion new_committed =
502 MemRegion(min_prev_start, cur_committed.end());
503 cur_committed = new_committed;
504 }
505 #ifdef ASSERT
506 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
507 assert(cur_committed.start() ==
508 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
509 os::vm_page_size()),
510 "Starts should have proper alignment");
511 #endif
512
513 volatile jbyte* new_start = byte_for(new_region.start());
514 // Round down because this is for the start address
515 HeapWord* new_start_aligned =
516 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
517 // The guard page is always committed and should not be committed over.
518 // This method is used in cases where the generation is growing toward
519 // lower addresses but the guard region is still at the end of the
520 // card table. That still makes sense when looking for writes
521 // off the end of the card table.
522 if (new_start_aligned < cur_committed.start()) {
523 // Expand the committed region
524 //
525 // Case A
526 // |+ guard +|
527 // |+ cur committed +++++++++|
528 // |+ new committed +++++++++++++++++|
529 //
530 // Case B
531 // |+ guard +|
532 // |+ cur committed +|
533 // |+ new committed +++++++|
566 if (!uncommit_region.is_empty()) {
567 if (!os::uncommit_memory((char*)uncommit_region.start(),
568 uncommit_region.byte_size())) {
569 // If the uncommit fails, ignore it. Let the
570 // committed table resizing go even though the committed
571 // table will over state the committed space.
572 }
573 }
574 #else
575 assert(!result, "Should be false with current workaround");
576 #endif
577 }
578 assert(_committed[changed_region].end() == cur_committed.end(),
579 "end should not change");
580 return result;
581 }
582
583 void CardTableExtension::resize_update_committed_table(int changed_region,
584 MemRegion new_region) {
585
586 volatile jbyte* new_start = byte_for(new_region.start());
587 // Set the new start of the committed region
588 HeapWord* new_start_aligned =
589 (HeapWord*)align_size_down((uintptr_t)new_start,
590 os::vm_page_size());
591 MemRegion new_committed = MemRegion(new_start_aligned,
592 _committed[changed_region].end());
593 _committed[changed_region] = new_committed;
594 _committed[changed_region].set_start(new_start_aligned);
595 }
596
597 void CardTableExtension::resize_update_card_table_entries(int changed_region,
598 MemRegion new_region) {
599 debug_only(verify_guard();)
600 MemRegion original_covered = _covered[changed_region];
601 // Initialize the card entries. Only consider the
602 // region covered by the card table (_whole_heap)
603 volatile jbyte* entry;
604 if (new_region.start() < _whole_heap.start()) {
605 entry = byte_for(_whole_heap.start());
606 } else {
607 entry = byte_for(new_region.start());
608 }
609 volatile jbyte* end = byte_for(original_covered.start());
610 // If _whole_heap starts at the original covered regions start,
611 // this loop will not execute.
612 while (entry < end) { *entry++ = clean_card; }
613 }
614
615 void CardTableExtension::resize_update_covered_table(int changed_region,
616 MemRegion new_region) {
617 // Update the covered region
618 _covered[changed_region].set_start(new_region.start());
619 _covered[changed_region].set_word_size(new_region.word_size());
620
621 // reorder regions. There should only be at most 1 out
622 // of order.
623 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
624 if (_covered[i].start() < _covered[i-1].start()) {
625 MemRegion covered_mr = _covered[i-1];
626 _covered[i-1] = _covered[i];
627 _covered[i] = covered_mr;
628 MemRegion committed_mr = _committed[i-1];
629 _committed[i-1] = _committed[i];
|