59 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
60 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
61
62 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
63 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
64
65 bool has_unmarked_oop() {
66 return _unmarked_addr != NULL;
67 }
68 };
69
70 // Checks all objects for the existence of some type of mark,
71 // precise or imprecise, dirty or newgen.
72 class CheckForUnmarkedObjects : public ObjectClosure {
73 private:
74 PSYoungGen* _young_gen;
75 CardTableExtension* _card_table;
76
77 public:
78 CheckForUnmarkedObjects() {
79 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
80 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
81
82 _young_gen = heap->young_gen();
83 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
84 // No point in asserting barrier set type here. Need to make CardTableExtension
85 // a unique barrier set type.
86 }
87
88 // Card marks are not precise. The current system can leave us with
89 // a mismatch of precise marks and beginning of object marks. This means
90 // we test for missing precise marks first. If any are found, we don't
91 // fail unless the object head is also unmarked.
92 virtual void do_object(oop obj) {
93 CheckForUnmarkedOops object_check(_young_gen, _card_table);
94 obj->oop_iterate_no_header(&object_check);
95 if (object_check.has_unmarked_oop()) {
96 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
97 }
98 }
99 };
100
101 // Checks for precise marking of oops as newgen.
308 // "current_card" is still the "following_clean_card" or
309 // the current_card is >= the worker_end_card so the
310 // loop will not execute again.
311 assert((current_card == following_clean_card) ||
312 (current_card >= worker_end_card),
313 "current_card should only be incremented if it still equals "
314 "following_clean_card");
315 // Increment current_card so that it is not processed again.
316 // It may now be dirty because a old-to-young pointer was
317 // found on it an updated. If it is now dirty, it cannot be
318 // be safely cleaned in the next iteration.
319 current_card++;
320 }
321 }
322 }
323
324 // This should be called before a scavenge.
325 void CardTableExtension::verify_all_young_refs_imprecise() {
326 CheckForUnmarkedObjects check;
327
328 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
329 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
330
331 PSOldGen* old_gen = heap->old_gen();
332
333 old_gen->object_iterate(&check);
334 }
335
336 // This should be called immediately after a scavenge, before mutators resume.
337 void CardTableExtension::verify_all_young_refs_precise() {
338 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
339 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
340
341 PSOldGen* old_gen = heap->old_gen();
342
343 CheckForPreciseMarks check(
344 heap->young_gen(),
345 barrier_set_cast<CardTableExtension>(heap->barrier_set()));
346
347 old_gen->oop_iterate_no_header(&check);
348
349 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
350 }
351
352 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
353 CardTableExtension* card_table =
354 barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
355
356 jbyte* bot = card_table->byte_for(mr.start());
357 jbyte* top = card_table->byte_for(mr.end());
358 while(bot <= top) {
359 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
360 if (*bot == verify_card)
361 *bot = youngergen_card;
362 bot++;
363 }
364 }
365
366 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
367 jbyte* p = byte_for(addr);
368 jbyte val = *p;
369
370 if (card_is_dirty(val))
371 return true;
372
373 if (card_is_newgen(val))
374 return true;
506 bool CardTableExtension::resize_commit_uncommit(int changed_region,
507 MemRegion new_region) {
508 bool result = false;
509 // Commit new or uncommit old pages, if necessary.
510 MemRegion cur_committed = _committed[changed_region];
511 assert(_covered[changed_region].end() == new_region.end(),
512 "The ends of the regions are expected to match");
513 // Extend the start of this _committed region to
514 // to cover the start of any previous _committed region.
515 // This forms overlapping regions, but never interior regions.
516 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
517 if (min_prev_start < cur_committed.start()) {
518 // Only really need to set start of "cur_committed" to
519 // the new start (min_prev_start) but assertion checking code
520 // below use cur_committed.end() so make it correct.
521 MemRegion new_committed =
522 MemRegion(min_prev_start, cur_committed.end());
523 cur_committed = new_committed;
524 }
525 #ifdef ASSERT
526 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
527 assert(cur_committed.start() ==
528 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
529 os::vm_page_size()),
530 "Starts should have proper alignment");
531 #endif
532
533 jbyte* new_start = byte_for(new_region.start());
534 // Round down because this is for the start address
535 HeapWord* new_start_aligned =
536 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
537 // The guard page is always committed and should not be committed over.
538 // This method is used in cases where the generation is growing toward
539 // lower addresses but the guard region is still at the end of the
540 // card table. That still makes sense when looking for writes
541 // off the end of the card table.
542 if (new_start_aligned < cur_committed.start()) {
543 // Expand the committed region
544 //
545 // Case A
546 // |+ guard +|
|
59 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
60 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
61
62 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
63 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
64
65 bool has_unmarked_oop() {
66 return _unmarked_addr != NULL;
67 }
68 };
69
70 // Checks all objects for the existence of some type of mark,
71 // precise or imprecise, dirty or newgen.
72 class CheckForUnmarkedObjects : public ObjectClosure {
73 private:
74 PSYoungGen* _young_gen;
75 CardTableExtension* _card_table;
76
77 public:
78 CheckForUnmarkedObjects() {
79 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
80 _young_gen = heap->young_gen();
81 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
82 // No point in asserting barrier set type here. Need to make CardTableExtension
83 // a unique barrier set type.
84 }
85
86 // Card marks are not precise. The current system can leave us with
87 // a mismatch of precise marks and beginning of object marks. This means
88 // we test for missing precise marks first. If any are found, we don't
89 // fail unless the object head is also unmarked.
90 virtual void do_object(oop obj) {
91 CheckForUnmarkedOops object_check(_young_gen, _card_table);
92 obj->oop_iterate_no_header(&object_check);
93 if (object_check.has_unmarked_oop()) {
94 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
95 }
96 }
97 };
98
99 // Checks for precise marking of oops as newgen.
306 // "current_card" is still the "following_clean_card" or
307 // the current_card is >= the worker_end_card so the
308 // loop will not execute again.
309 assert((current_card == following_clean_card) ||
310 (current_card >= worker_end_card),
311 "current_card should only be incremented if it still equals "
312 "following_clean_card");
313 // Increment current_card so that it is not processed again.
314 // It may now be dirty because a old-to-young pointer was
315 // found on it an updated. If it is now dirty, it cannot be
316 // be safely cleaned in the next iteration.
317 current_card++;
318 }
319 }
320 }
321
322 // This should be called before a scavenge.
323 void CardTableExtension::verify_all_young_refs_imprecise() {
324 CheckForUnmarkedObjects check;
325
326 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
327 PSOldGen* old_gen = heap->old_gen();
328
329 old_gen->object_iterate(&check);
330 }
331
332 // This should be called immediately after a scavenge, before mutators resume.
333 void CardTableExtension::verify_all_young_refs_precise() {
334 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
335 PSOldGen* old_gen = heap->old_gen();
336
337 CheckForPreciseMarks check(
338 heap->young_gen(),
339 barrier_set_cast<CardTableExtension>(heap->barrier_set()));
340
341 old_gen->oop_iterate_no_header(&check);
342
343 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
344 }
345
346 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
347 CardTableExtension* card_table =
348 barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
349
350 jbyte* bot = card_table->byte_for(mr.start());
351 jbyte* top = card_table->byte_for(mr.end());
352 while(bot <= top) {
353 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
354 if (*bot == verify_card)
355 *bot = youngergen_card;
356 bot++;
357 }
358 }
359
360 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
361 jbyte* p = byte_for(addr);
362 jbyte val = *p;
363
364 if (card_is_dirty(val))
365 return true;
366
367 if (card_is_newgen(val))
368 return true;
500 bool CardTableExtension::resize_commit_uncommit(int changed_region,
501 MemRegion new_region) {
502 bool result = false;
503 // Commit new or uncommit old pages, if necessary.
504 MemRegion cur_committed = _committed[changed_region];
505 assert(_covered[changed_region].end() == new_region.end(),
506 "The ends of the regions are expected to match");
507 // Extend the start of this _committed region to
508 // to cover the start of any previous _committed region.
509 // This forms overlapping regions, but never interior regions.
510 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
511 if (min_prev_start < cur_committed.start()) {
512 // Only really need to set start of "cur_committed" to
513 // the new start (min_prev_start) but assertion checking code
514 // below use cur_committed.end() so make it correct.
515 MemRegion new_committed =
516 MemRegion(min_prev_start, cur_committed.end());
517 cur_committed = new_committed;
518 }
519 #ifdef ASSERT
520 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
521 assert(cur_committed.start() ==
522 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
523 os::vm_page_size()),
524 "Starts should have proper alignment");
525 #endif
526
527 jbyte* new_start = byte_for(new_region.start());
528 // Round down because this is for the start address
529 HeapWord* new_start_aligned =
530 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
531 // The guard page is always committed and should not be committed over.
532 // This method is used in cases where the generation is growing toward
533 // lower addresses but the guard region is still at the end of the
534 // card table. That still makes sense when looking for writes
535 // off the end of the card table.
536 if (new_start_aligned < cur_committed.start()) {
537 // Expand the committed region
538 //
539 // Case A
540 // |+ guard +|
|