81 _covered = new MemRegion[max_covered_regions];
82 _committed = new MemRegion[max_covered_regions];
83 if (_covered == NULL || _committed == NULL) {
84 vm_exit_during_initialization("couldn't alloc card table covered region set.");
85 }
86
87 _cur_covered_regions = 0;
88 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
89 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
90 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
91
92 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
93
94 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
95 _page_size, heap_rs.base(), heap_rs.size());
96 if (!heap_rs.is_reserved()) {
97 vm_exit_during_initialization("Could not reserve enough space for the "
98 "card marking array");
99 }
100
101 // The assember store_check code will do an unsigned shift of the oop,
102 // then add it to byte_map_base, i.e.
103 //
104 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
105 _byte_map = (jbyte*) heap_rs.base();
106 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
107 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
108 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
109
110 jbyte* guard_card = &_byte_map[_guard_index];
111 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
112 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
113 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
114 !ExecMem, "card table last card");
115 *guard_card = last_card;
116
117 _lowest_non_clean =
118 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
119 _lowest_non_clean_chunk_size =
120 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
121 _lowest_non_clean_base_chunk_index =
226 }
227 }
228 // Never include the guard page.
229 result = result.minus(_guard_region);
230 return result;
231 }
232
233 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
234 // We don't change the start of a region, only the end.
235 assert(_whole_heap.contains(new_region),
236 "attempt to cover area not in reserved area");
237 debug_only(verify_guard();)
238 // collided is true if the expansion would push into another committed region
239 debug_only(bool collided = false;)
240 int const ind = find_covering_region_by_base(new_region.start());
241 MemRegion const old_region = _covered[ind];
242 assert(old_region.start() == new_region.start(), "just checking");
243 if (new_region.word_size() != old_region.word_size()) {
244 // Commit new or uncommit old pages, if necessary.
245 MemRegion cur_committed = _committed[ind];
246 // Extend the end of this _commited region
247 // to cover the end of any lower _committed regions.
248 // This forms overlapping regions, but never interior regions.
249 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
250 if (max_prev_end > cur_committed.end()) {
251 cur_committed.set_end(max_prev_end);
252 }
253 // Align the end up to a page size (starts are already aligned).
254 jbyte* const new_end = byte_after(new_region.last());
255 HeapWord* new_end_aligned =
256 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
257 assert(new_end_aligned >= (HeapWord*) new_end,
258 "align up, but less");
259 // Check the other regions (excludes "ind") to ensure that
260 // the new_end_aligned does not intrude onto the committed
261 // space of another region.
262 int ri = 0;
263 for (ri = 0; ri < _cur_covered_regions; ri++) {
264 if (ri != ind) {
265 if (_committed[ri].contains(new_end_aligned)) {
266 // The prior check included in the assert
431 if (!mr.is_empty()) {
432 // Caller (process_strong_roots()) claims that all GC threads
433 // execute this call. With UseDynamicNumberOfGCThreads now all
434 // active GC threads execute this call. The number of active GC
435 // threads needs to be passed to par_non_clean_card_iterate_work()
436 // to get proper partitioning and termination.
437 //
438 // This is an example of where n_par_threads() is used instead
439 // of workers()->active_workers(). n_par_threads can be set to 0 to
440 // turn off parallelism. For example when this code is called as
441 // part of verification and SharedHeap::process_strong_roots() is being
442 // used, then n_par_threads() may have been set to 0. active_workers
443 // is not overloaded with the meaning that it is a switch to disable
444 // parallelism and so keeps the meaning of the number of
445 // active gc workers. If parallelism has not been shut off by
446 // setting n_par_threads to 0, then n_par_threads should be
447 // equal to active_workers. When a different mechanism for shutting
448 // off parallelism is used, then active_workers can be used in
449 // place of n_par_threads.
450 // This is an example of a path where n_par_threads is
451 // set to 0 to turn off parallism.
452 // [7] CardTableModRefBS::non_clean_card_iterate()
453 // [8] CardTableRS::younger_refs_in_space_iterate()
454 // [9] Generation::younger_refs_in_space_iterate()
455 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
456 // [11] CompactingPermGenGen::younger_refs_iterate()
457 // [12] CardTableRS::younger_refs_iterate()
458 // [13] SharedHeap::process_strong_roots()
459 // [14] G1CollectedHeap::verify()
460 // [15] Universe::verify()
461 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
462 //
463 int n_threads = SharedHeap::heap()->n_par_threads();
464 bool is_par = n_threads > 0;
465 if (is_par) {
466 #if INCLUDE_ALL_GCS
467 assert(SharedHeap::heap()->n_par_threads() ==
468 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
469 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
470 #else // INCLUDE_ALL_GCS
471 fatal("Parallel gc not supported here.");
|
81 _covered = new MemRegion[max_covered_regions];
82 _committed = new MemRegion[max_covered_regions];
83 if (_covered == NULL || _committed == NULL) {
84 vm_exit_during_initialization("couldn't alloc card table covered region set.");
85 }
86
87 _cur_covered_regions = 0;
88 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
89 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
90 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
91
92 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
93
94 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
95 _page_size, heap_rs.base(), heap_rs.size());
96 if (!heap_rs.is_reserved()) {
97 vm_exit_during_initialization("Could not reserve enough space for the "
98 "card marking array");
99 }
100
101 // The assembler store_check code will do an unsigned shift of the oop,
102 // then add it to byte_map_base, i.e.
103 //
104 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
105 _byte_map = (jbyte*) heap_rs.base();
106 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
107 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
108 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
109
110 jbyte* guard_card = &_byte_map[_guard_index];
111 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
112 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
113 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
114 !ExecMem, "card table last card");
115 *guard_card = last_card;
116
117 _lowest_non_clean =
118 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
119 _lowest_non_clean_chunk_size =
120 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
121 _lowest_non_clean_base_chunk_index =
226 }
227 }
228 // Never include the guard page.
229 result = result.minus(_guard_region);
230 return result;
231 }
232
233 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
234 // We don't change the start of a region, only the end.
235 assert(_whole_heap.contains(new_region),
236 "attempt to cover area not in reserved area");
237 debug_only(verify_guard();)
238 // collided is true if the expansion would push into another committed region
239 debug_only(bool collided = false;)
240 int const ind = find_covering_region_by_base(new_region.start());
241 MemRegion const old_region = _covered[ind];
242 assert(old_region.start() == new_region.start(), "just checking");
243 if (new_region.word_size() != old_region.word_size()) {
244 // Commit new or uncommit old pages, if necessary.
245 MemRegion cur_committed = _committed[ind];
246 // Extend the end of this _committed region
247 // to cover the end of any lower _committed regions.
248 // This forms overlapping regions, but never interior regions.
249 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
250 if (max_prev_end > cur_committed.end()) {
251 cur_committed.set_end(max_prev_end);
252 }
253 // Align the end up to a page size (starts are already aligned).
254 jbyte* const new_end = byte_after(new_region.last());
255 HeapWord* new_end_aligned =
256 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
257 assert(new_end_aligned >= (HeapWord*) new_end,
258 "align up, but less");
259 // Check the other regions (excludes "ind") to ensure that
260 // the new_end_aligned does not intrude onto the committed
261 // space of another region.
262 int ri = 0;
263 for (ri = 0; ri < _cur_covered_regions; ri++) {
264 if (ri != ind) {
265 if (_committed[ri].contains(new_end_aligned)) {
266 // The prior check included in the assert
431 if (!mr.is_empty()) {
432 // Caller (process_strong_roots()) claims that all GC threads
433 // execute this call. With UseDynamicNumberOfGCThreads now all
434 // active GC threads execute this call. The number of active GC
435 // threads needs to be passed to par_non_clean_card_iterate_work()
436 // to get proper partitioning and termination.
437 //
438 // This is an example of where n_par_threads() is used instead
439 // of workers()->active_workers(). n_par_threads can be set to 0 to
440 // turn off parallelism. For example when this code is called as
441 // part of verification and SharedHeap::process_strong_roots() is being
442 // used, then n_par_threads() may have been set to 0. active_workers
443 // is not overloaded with the meaning that it is a switch to disable
444 // parallelism and so keeps the meaning of the number of
445 // active gc workers. If parallelism has not been shut off by
446 // setting n_par_threads to 0, then n_par_threads should be
447 // equal to active_workers. When a different mechanism for shutting
448 // off parallelism is used, then active_workers can be used in
449 // place of n_par_threads.
450 // This is an example of a path where n_par_threads is
451 // set to 0 to turn off parallelism.
452 // [7] CardTableModRefBS::non_clean_card_iterate()
453 // [8] CardTableRS::younger_refs_in_space_iterate()
454 // [9] Generation::younger_refs_in_space_iterate()
455 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
456 // [11] CompactingPermGenGen::younger_refs_iterate()
457 // [12] CardTableRS::younger_refs_iterate()
458 // [13] SharedHeap::process_strong_roots()
459 // [14] G1CollectedHeap::verify()
460 // [15] Universe::verify()
461 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
462 //
463 int n_threads = SharedHeap::heap()->n_par_threads();
464 bool is_par = n_threads > 0;
465 if (is_par) {
466 #if INCLUDE_ALL_GCS
467 assert(SharedHeap::heap()->n_par_threads() ==
468 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
469 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
470 #else // INCLUDE_ALL_GCS
471 fatal("Parallel gc not supported here.");
|