356 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
357 "just checking");
358 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
359 "just checking");
360 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
361 "just checking");
362 }
363
364 int** FromCardCache::_cache = NULL;
365 uint FromCardCache::_max_regions = 0;
366 size_t FromCardCache::_static_mem_size = 0;
367
368 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
369 guarantee(_cache == NULL, "Should not call this multiple times");
370
371 _max_regions = max_num_regions;
372 _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
373 _max_regions,
374 &_static_mem_size);
375
376 for (uint i = 0; i < n_par_rs; i++) {
377 for (uint j = 0; j < _max_regions; j++) {
378 set(i, j, InvalidCard);
379 }
380 }
381 }
382
383 void FromCardCache::shrink(uint new_num_regions) {
384 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
385 assert(new_num_regions <= _max_regions, "Must be within max.");
386 for (uint j = new_num_regions; j < _max_regions; j++) {
387 set(i, j, InvalidCard);
388 }
389 }
390 }
391
392 #ifndef PRODUCT
393 void FromCardCache::print(outputStream* out) {
394 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
395 for (uint j = 0; j < _max_regions; j++) {
396 out->print_cr("_from_card_cache[%u][%u] = %d.",
397 i, j, at(i, j));
398 }
399 }
400 }
401 #endif
402
403 void FromCardCache::clear(uint region_idx) {
404 uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
405 for (uint i = 0; i < num_par_remsets; i++) {
406 set(i, region_idx, InvalidCard);
407 }
408 }
409
410 void OtherRegionsTable::init_from_card_cache(uint max_regions) {
411 FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
412 }
413
414 void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
415 FromCardCache::shrink(new_num_regions);
416 }
417
418 void OtherRegionsTable::print_from_card_cache() {
419 FromCardCache::print();
420 }
421
422 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
423 uint cur_hrs_ind = hr()->hrs_index();
424
425 if (G1TraceHeapRegionRememberedSet) {
426 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
427 from,
428 UseCompressedOops
429 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
430 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
431 }
432
433 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
434
435 if (G1TraceHeapRegionRememberedSet) {
824 return _sparse_table.contains_card(hr_ind, card_index);
825 }
826 }
827
828 void
829 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
830 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
831 }
832
833 // Determines how many threads can add records to an rset in parallel.
834 // This can be done by either mutator threads together with the
835 // concurrent refinement threads or GC threads.
836 uint HeapRegionRemSet::num_par_rem_sets() {
837 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
838 }
839
840 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
841 HeapRegion* hr)
842 : _bosa(bosa),
843 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
844 _code_roots(), _other_regions(hr, &_m) {
845 reset_for_par_iteration();
846 }
847
848 void HeapRegionRemSet::setup_remset_size() {
849 // Setup sparse and fine-grain tables sizes.
850 // table_size = base * (log(region_size / 1M) + 1)
851 const int LOG_M = 20;
852 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
853 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
854 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
855 }
856 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
857 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
858 }
859 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
860 }
861
862 bool HeapRegionRemSet::claim_iter() {
863 if (_iter_state != Unclaimed) return false;
864 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
|
356 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
357 "just checking");
358 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
359 "just checking");
360 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
361 "just checking");
362 }
363
364 int** FromCardCache::_cache = NULL;
365 uint FromCardCache::_max_regions = 0;
366 size_t FromCardCache::_static_mem_size = 0;
367
368 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
369 guarantee(_cache == NULL, "Should not call this multiple times");
370
371 _max_regions = max_num_regions;
372 _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
373 _max_regions,
374 &_static_mem_size);
375
376 invalidate(0, _max_regions);
377 }
378
379 void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
380 guarantee((size_t)start_idx + new_num_regions <= max_uintx,
381 err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
382 start_idx, new_num_regions));
383 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
384 uint end_idx = (start_idx + (uint)new_num_regions);
385 assert(end_idx <= _max_regions, "Must be within max.");
386 for (uint j = start_idx; j < end_idx; j++) {
387 set(i, j, InvalidCard);
388 }
389 }
390 }
391
392 #ifndef PRODUCT
393 void FromCardCache::print(outputStream* out) {
394 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
395 for (uint j = 0; j < _max_regions; j++) {
396 out->print_cr("_from_card_cache[%u][%u] = %d.",
397 i, j, at(i, j));
398 }
399 }
400 }
401 #endif
402
403 void FromCardCache::clear(uint region_idx) {
404 uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
405 for (uint i = 0; i < num_par_remsets; i++) {
406 set(i, region_idx, InvalidCard);
407 }
408 }
409
410 void OtherRegionsTable::initialize(uint max_regions) {
411 FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
412 }
413
414 void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
415 FromCardCache::invalidate(start_idx, num_regions);
416 }
417
418 void OtherRegionsTable::print_from_card_cache() {
419 FromCardCache::print();
420 }
421
422 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
423 uint cur_hrs_ind = hr()->hrs_index();
424
425 if (G1TraceHeapRegionRememberedSet) {
426 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
427 from,
428 UseCompressedOops
429 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
430 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
431 }
432
433 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
434
435 if (G1TraceHeapRegionRememberedSet) {
824 return _sparse_table.contains_card(hr_ind, card_index);
825 }
826 }
827
828 void
829 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
830 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
831 }
832
833 // Determines how many threads can add records to an rset in parallel.
834 // This can be done by either mutator threads together with the
835 // concurrent refinement threads or GC threads.
836 uint HeapRegionRemSet::num_par_rem_sets() {
837 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
838 }
839
840 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
841 HeapRegion* hr)
842 : _bosa(bosa),
843 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
844 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
845 reset_for_par_iteration();
846 }
847
848 void HeapRegionRemSet::setup_remset_size() {
849 // Setup sparse and fine-grain tables sizes.
850 // table_size = base * (log(region_size / 1M) + 1)
851 const int LOG_M = 20;
852 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
853 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
854 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
855 }
856 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
857 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
858 }
859 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
860 }
861
862 bool HeapRegionRemSet::claim_iter() {
863 if (_iter_state != Unclaimed) return false;
864 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
|